2024-11-21 00:26:52,864 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-21 00:26:52,885 main DEBUG Took 0.018632 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-21 00:26:52,886 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-21 00:26:52,886 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-21 00:26:52,888 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-21 00:26:52,889 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,900 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-21 00:26:52,919 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,921 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,922 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,923 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,924 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,924 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,926 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,926 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,927 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,928 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,929 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,930 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,931 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,931 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,932 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,932 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,933 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,933 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,934 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,934 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,935 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,936 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,936 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,937 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:26:52,937 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,938 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-21 00:26:52,940 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:26:52,941 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-21 00:26:52,944 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-21 00:26:52,945 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-21 00:26:52,946 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-21 00:26:52,947 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-21 00:26:52,958 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-21 00:26:52,961 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-21 00:26:52,964 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-21 00:26:52,965 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-21 00:26:52,965 main DEBUG createAppenders(={Console}) 2024-11-21 00:26:52,967 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-21 00:26:52,967 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-21 00:26:52,967 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-21 00:26:52,968 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-21 00:26:52,968 main DEBUG OutputStream closed 2024-11-21 00:26:52,969 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-21 00:26:52,969 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-21 00:26:52,969 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-21 00:26:53,088 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-21 00:26:53,091 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-21 00:26:53,093 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-21 00:26:53,095 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-21 00:26:53,096 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-21 00:26:53,096 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-21 00:26:53,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-21 00:26:53,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-21 00:26:53,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-21 00:26:53,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-21 00:26:53,099 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-21 00:26:53,099 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-21 00:26:53,100 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-21 00:26:53,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-21 00:26:53,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-21 00:26:53,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-21 00:26:53,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-21 00:26:53,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-21 00:26:53,107 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21 00:26:53,107 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-21 00:26:53,108 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-21 00:26:53,109 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-21T00:26:53,475 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56 2024-11-21 00:26:53,479 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-21 00:26:53,480 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21T00:26:53,492 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-21T00:26:53,534 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:26:53,538 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552, deleteOnExit=true 2024-11-21T00:26:53,538 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-21T00:26:53,539 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/test.cache.data in system properties and HBase conf 2024-11-21T00:26:53,540 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:26:53,541 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:26:53,542 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:26:53,543 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:26:53,544 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-21T00:26:53,674 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-21T00:26:53,845 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:26:53,855 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:53,856 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:53,856 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:26:53,857 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:53,858 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:26:53,859 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:26:53,860 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:53,860 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:53,861 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:26:53,862 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:26:53,862 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:26:53,863 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:53,864 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:26:53,864 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:26:55,111 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-21T00:26:55,218 INFO [Time-limited test {}] log.Log(170): Logging initialized @3375ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-21T00:26:55,324 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:55,400 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:55,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:55,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:55,421 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:26:55,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:55,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:55,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:55,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0d4558{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/java.io.tmpdir/jetty-localhost-42153-hadoop-hdfs-3_4_1-tests_jar-_-any-8841154849344358765/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:26:55,646 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:42153} 2024-11-21T00:26:55,646 INFO [Time-limited test {}] server.Server(415): Started @3804ms 2024-11-21T00:26:56,107 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:56,115 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:56,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:56,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:56,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:26:56,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:56,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:56,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bd2e890{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/java.io.tmpdir/jetty-localhost-37819-hadoop-hdfs-3_4_1-tests_jar-_-any-11865351649156704506/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:26:56,221 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:37819} 2024-11-21T00:26:56,222 INFO [Time-limited test {}] server.Server(415): Started @4380ms 2024-11-21T00:26:56,269 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:26:57,188 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/dfs/data/data1/current/BP-364493626-172.17.0.2-1732148814546/current, will proceed with Du for space computation calculation, 2024-11-21T00:26:57,188 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/dfs/data/data2/current/BP-364493626-172.17.0.2-1732148814546/current, will proceed with Du for space computation calculation, 2024-11-21T00:26:57,240 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:26:57,286 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe23b72eb967f906f with lease ID 0x66a1625cc68c85c4: Processing first storage report for DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3 from datanode DatanodeRegistration(127.0.0.1:39227, datanodeUuid=a8cc0fd4-480a-47ac-9b45-4eb618d27036, infoPort=45459, infoSecurePort=0, ipcPort=38767, storageInfo=lv=-57;cid=testClusterID;nsid=1683673958;c=1732148814546) 2024-11-21T00:26:57,287 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe23b72eb967f906f with lease ID 0x66a1625cc68c85c4: from storage DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3 node DatanodeRegistration(127.0.0.1:39227, datanodeUuid=a8cc0fd4-480a-47ac-9b45-4eb618d27036, infoPort=45459, infoSecurePort=0, ipcPort=38767, storageInfo=lv=-57;cid=testClusterID;nsid=1683673958;c=1732148814546), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T00:26:57,288 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe23b72eb967f906f with lease ID 0x66a1625cc68c85c4: Processing first storage report for DS-6d0779f9-ed17-4de9-9b7f-d922c3a46254 from datanode DatanodeRegistration(127.0.0.1:39227, datanodeUuid=a8cc0fd4-480a-47ac-9b45-4eb618d27036, infoPort=45459, infoSecurePort=0, ipcPort=38767, storageInfo=lv=-57;cid=testClusterID;nsid=1683673958;c=1732148814546) 2024-11-21T00:26:57,288 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe23b72eb967f906f with lease ID 0x66a1625cc68c85c4: from storage DS-6d0779f9-ed17-4de9-9b7f-d922c3a46254 node DatanodeRegistration(127.0.0.1:39227, datanodeUuid=a8cc0fd4-480a-47ac-9b45-4eb618d27036, infoPort=45459, infoSecurePort=0, ipcPort=38767, storageInfo=lv=-57;cid=testClusterID;nsid=1683673958;c=1732148814546), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:26:57,300 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56 2024-11-21T00:26:57,373 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/zookeeper_0, clientPort=64241, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:26:57,383 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64241 2024-11-21T00:26:57,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:57,402 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:57,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:26:57,737 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f with version=8 2024-11-21T00:26:57,737 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/hbase-staging 2024-11-21T00:26:58,022 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-21T00:26:58,547 INFO [Time-limited test {}] client.ConnectionUtils(129): master/0e7930017ff8:0 server-side Connection retries=45 2024-11-21T00:26:58,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:58,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:58,623 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:26:58,623 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:58,624 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:26:58,889 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:26:58,976 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-21T00:26:58,990 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-21T00:26:58,996 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:26:59,034 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 51272 (auto-detected) 2024-11-21T00:26:59,035 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-21T00:26:59,082 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35089 2024-11-21T00:26:59,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:59,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:59,124 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35089 connecting to ZooKeeper ensemble=127.0.0.1:64241 2024-11-21T00:26:59,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350890x0, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:59,272 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35089-0x1015c13103f0000 connected 2024-11-21T00:26:59,432 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T00:26:59,440 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T00:26:59,445 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T00:26:59,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35089 2024-11-21T00:26:59,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35089 2024-11-21T00:26:59,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35089 2024-11-21T00:26:59,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35089 2024-11-21T00:26:59,481 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35089 2024-11-21T00:26:59,490 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f, hbase.cluster.distributed=false 2024-11-21T00:26:59,588 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/0e7930017ff8:0 server-side Connection retries=45 2024-11-21T00:26:59,588 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:59,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:59,591 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:26:59,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:59,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:26:59,594 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:26:59,600 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:26:59,601 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37961 2024-11-21T00:26:59,608 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:26:59,626 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:26:59,628 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:59,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:59,642 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:37961 connecting to ZooKeeper ensemble=127.0.0.1:64241 2024-11-21T00:26:59,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379610x0, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T00:26:59,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379610x0, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:59,660 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379610x0, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T00:26:59,662 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379610x0, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T00:26:59,695 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37961-0x1015c13103f0001 connected 2024-11-21T00:26:59,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37961 2024-11-21T00:26:59,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37961 2024-11-21T00:26:59,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37961 2024-11-21T00:26:59,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37961 2024-11-21T00:26:59,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37961 2024-11-21T00:26:59,781 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/0e7930017ff8,35089,1732148818012 2024-11-21T00:26:59,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T00:26:59,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T00:26:59,799 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0e7930017ff8,35089,1732148818012 2024-11-21T00:26:59,803 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0e7930017ff8:35089 2024-11-21T00:26:59,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T00:26:59,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T00:26:59,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:26:59,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:26:59,865 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T00:26:59,866 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0e7930017ff8,35089,1732148818012 from backup master directory 2024-11-21T00:26:59,867 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T00:26:59,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T00:26:59,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0e7930017ff8,35089,1732148818012 2024-11-21T00:26:59,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T00:26:59,882 WARN [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:26:59,882 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0e7930017ff8,35089,1732148818012 2024-11-21T00:26:59,884 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-21T00:26:59,886 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-21T00:27:00,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:27:00,493 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/hbase.id with ID: 44b4c5de-d7ee-45e5-b8cf-0b95c7026002 2024-11-21T00:27:00,587 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:00,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:00,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:00,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:27:00,751 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:00,754 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:27:00,779 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:00,785 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:00,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:27:00,910 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store 2024-11-21T00:27:00,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:27:01,001 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-21T00:27:01,002 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:01,004 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:01,004 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:01,005 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:01,005 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:01,005 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:01,005 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:01,005 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-21T00:27:01,011 WARN [master/0e7930017ff8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/.initializing 2024-11-21T00:27:01,011 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/WALs/0e7930017ff8,35089,1732148818012 2024-11-21T00:27:01,031 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:01,065 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0e7930017ff8%2C35089%2C1732148818012, suffix=, logDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/WALs/0e7930017ff8,35089,1732148818012, archiveDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/oldWALs, maxLogs=10 2024-11-21T00:27:01,125 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/WALs/0e7930017ff8,35089,1732148818012/0e7930017ff8%2C35089%2C1732148818012.1732148821080, exclude list is [], retry=0 2024-11-21T00:27:01,169 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39227,DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3,DISK] 2024-11-21T00:27:01,173 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-21T00:27:01,228 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/WALs/0e7930017ff8,35089,1732148818012/0e7930017ff8%2C35089%2C1732148818012.1732148821080 2024-11-21T00:27:01,232 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45459:45459)] 2024-11-21T00:27:01,235 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:01,235 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:01,244 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,250 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:27:01,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:01,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:01,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:27:01,365 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:01,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:01,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:27:01,383 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:01,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:01,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:27:01,393 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:01,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:01,403 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,405 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,433 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:01,446 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:01,473 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:01,475 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63918071, jitterRate=-0.047546520829200745}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:01,483 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-21T00:27:01,487 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:27:01,552 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@260f7eb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:01,616 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-21T00:27:01,646 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:27:01,647 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:27:01,653 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:27:01,667 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 13 msec 2024-11-21T00:27:01,675 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 7 msec 2024-11-21T00:27:01,675 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:27:01,759 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:27:01,778 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T00:27:01,786 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-21T00:27:01,791 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:27:01,793 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:27:01,804 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-21T00:27:01,807 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:27:01,833 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:27:01,845 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-21T00:27:01,848 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:27:01,867 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T00:27:01,886 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:27:01,900 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T00:27:01,921 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=0e7930017ff8,35089,1732148818012, sessionid=0x1015c13103f0000, setting cluster-up flag (Was=false) 2024-11-21T00:27:01,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T00:27:01,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:01,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T00:27:01,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:01,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:01,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:02,004 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T00:27:02,008 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0e7930017ff8,35089,1732148818012 2024-11-21T00:27:02,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:02,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:02,061 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T00:27:02,067 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0e7930017ff8,35089,1732148818012 2024-11-21T00:27:02,225 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:02,234 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0e7930017ff8:37961 2024-11-21T00:27:02,236 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1008): ClusterId : 44b4c5de-d7ee-45e5-b8cf-0b95c7026002 2024-11-21T00:27:02,234 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-21T00:27:02,241 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:27:02,256 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:27:02,261 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:27:02,262 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:27:02,264 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0e7930017ff8,35089,1732148818012 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:27:02,271 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0e7930017ff8:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:02,272 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0e7930017ff8:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:02,272 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0e7930017ff8:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:02,272 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0e7930017ff8:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:02,272 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0e7930017ff8:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:27:02,273 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,273 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0e7930017ff8:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:02,273 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,287 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:27:02,291 DEBUG [RS:0;0e7930017ff8:37961 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63106724, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:02,311 DEBUG [RS:0;0e7930017ff8:37961 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c273949, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0e7930017ff8/172.17.0.2:0 2024-11-21T00:27:02,316 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-21T00:27:02,316 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-21T00:27:02,316 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-21T00:27:02,319 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:02,320 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:27:02,321 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(3073): reportForDuty to master=0e7930017ff8,35089,1732148818012 with isa=0e7930017ff8/172.17.0.2:37961, startcode=1732148819586 2024-11-21T00:27:02,325 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:02,326 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:27:02,351 DEBUG [RS:0;0e7930017ff8:37961 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:27:02,379 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148852379 2024-11-21T00:27:02,381 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:27:02,383 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:27:02,388 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:27:02,388 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:27:02,389 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:27:02,389 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:27:02,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741831_1007 (size=1039) 2024-11-21T00:27:02,395 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,397 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-21T00:27:02,398 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:27:02,423 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:27:02,424 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:27:02,425 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:27:02,443 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:27:02,444 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:27:02,445 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45869, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:27:02,454 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35089 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:02,462 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0e7930017ff8:0:becomeActiveMaster-HFileCleaner.large.0-1732148822446,5,FailOnTimeoutGroup] 2024-11-21T00:27:02,474 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0e7930017ff8:0:becomeActiveMaster-HFileCleaner.small.0-1732148822462,5,FailOnTimeoutGroup] 2024-11-21T00:27:02,474 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,474 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:27:02,476 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,476 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:27:02,494 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:02,505 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-21T00:27:02,505 WARN [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T00:27:02,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:02,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:02,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:02,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:02,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:02,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:02,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:02,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:02,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:02,532 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:02,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:02,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:02,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740 2024-11-21T00:27:02,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740 2024-11-21T00:27:02,558 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:27:02,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-21T00:27:02,574 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:02,578 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75182217, jitterRate=0.12030233442783356}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:27:02,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-21T00:27:02,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:02,588 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:02,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:02,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:02,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:02,603 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:02,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-21T00:27:02,607 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(3073): reportForDuty to master=0e7930017ff8,35089,1732148818012 with isa=0e7930017ff8/172.17.0.2:37961, startcode=1732148819586 2024-11-21T00:27:02,610 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:02,610 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-21T00:27:02,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35089 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:02,614 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35089 {}] master.ServerManager(486): Registering regionserver=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:02,626 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:27:02,626 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38105 2024-11-21T00:27:02,627 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-21T00:27:02,637 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:27:02,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T00:27:02,651 DEBUG [RS:0;0e7930017ff8:37961 {}] zookeeper.ZKUtil(111): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0e7930017ff8,37961,1732148819586 2024-11-21T00:27:02,651 WARN [RS:0;0e7930017ff8:37961 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:02,651 INFO [RS:0;0e7930017ff8:37961 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:02,651 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/WALs/0e7930017ff8,37961,1732148819586 2024-11-21T00:27:02,652 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:02,655 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:27:02,656 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0e7930017ff8,37961,1732148819586] 2024-11-21T00:27:02,686 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-21T00:27:02,707 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:27:02,753 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:27:02,759 INFO [RS:0;0e7930017ff8:37961 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:27:02,760 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,771 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-21T00:27:02,781 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,782 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,782 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,782 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,782 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,782 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,782 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0e7930017ff8:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:02,783 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,783 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,783 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,783 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,783 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0e7930017ff8:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:02,783 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0e7930017ff8:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:02,784 DEBUG [RS:0;0e7930017ff8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:02,806 WARN [0e7930017ff8:35089 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:27:02,811 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,812 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,812 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,812 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,812 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,37961,1732148819586-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:02,896 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:27:02,898 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,37961,1732148819586-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:02,973 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.Replication(204): 0e7930017ff8,37961,1732148819586 started 2024-11-21T00:27:02,974 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1767): Serving as 0e7930017ff8,37961,1732148819586, RpcServer on 0e7930017ff8/172.17.0.2:37961, sessionid=0x1015c13103f0001 2024-11-21T00:27:02,975 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:27:02,975 DEBUG [RS:0;0e7930017ff8:37961 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:02,975 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0e7930017ff8,37961,1732148819586' 2024-11-21T00:27:02,975 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T00:27:02,976 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T00:27:02,977 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:27:02,977 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:27:02,977 DEBUG [RS:0;0e7930017ff8:37961 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:02,977 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0e7930017ff8,37961,1732148819586' 2024-11-21T00:27:02,977 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T00:27:02,978 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T00:27:02,979 DEBUG [RS:0;0e7930017ff8:37961 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:27:02,980 INFO [RS:0;0e7930017ff8:37961 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:27:02,980 INFO [RS:0;0e7930017ff8:37961 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:27:03,089 INFO [RS:0;0e7930017ff8:37961 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:03,096 INFO [RS:0;0e7930017ff8:37961 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0e7930017ff8%2C37961%2C1732148819586, suffix=, logDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/WALs/0e7930017ff8,37961,1732148819586, archiveDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/oldWALs, maxLogs=32 2024-11-21T00:27:03,125 DEBUG [RS:0;0e7930017ff8:37961 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/WALs/0e7930017ff8,37961,1732148819586/0e7930017ff8%2C37961%2C1732148819586.1732148823100, exclude list is [], retry=0 2024-11-21T00:27:03,133 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39227,DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3,DISK] 2024-11-21T00:27:03,163 INFO [RS:0;0e7930017ff8:37961 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/WALs/0e7930017ff8,37961,1732148819586/0e7930017ff8%2C37961%2C1732148819586.1732148823100 2024-11-21T00:27:03,183 DEBUG [RS:0;0e7930017ff8:37961 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45459:45459)] 2024-11-21T00:27:03,308 DEBUG [0e7930017ff8:35089 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:27:03,314 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:03,323 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0e7930017ff8,37961,1732148819586, state=OPENING 2024-11-21T00:27:03,336 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:27:03,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:03,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:03,346 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T00:27:03,346 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T00:27:03,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:27:03,556 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:03,558 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:03,576 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:27:03,598 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-21T00:27:03,598 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:03,607 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:27:03,613 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0e7930017ff8%2C37961%2C1732148819586.meta, suffix=.meta, logDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/WALs/0e7930017ff8,37961,1732148819586, archiveDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/oldWALs, maxLogs=32 2024-11-21T00:27:03,640 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/WALs/0e7930017ff8,37961,1732148819586/0e7930017ff8%2C37961%2C1732148819586.meta.1732148823615.meta, exclude list is [], retry=0 2024-11-21T00:27:03,656 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39227,DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3,DISK] 2024-11-21T00:27:03,671 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/WALs/0e7930017ff8,37961,1732148819586/0e7930017ff8%2C37961%2C1732148819586.meta.1732148823615.meta 2024-11-21T00:27:03,682 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45459:45459)] 2024-11-21T00:27:03,683 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:03,685 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:03,805 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:27:03,821 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:27:03,827 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:27:03,828 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:03,828 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-21T00:27:03,828 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-21T00:27:03,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:03,851 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:03,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:03,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:03,860 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:03,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:03,865 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:03,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:03,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:03,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:03,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:03,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:03,886 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740 2024-11-21T00:27:03,896 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740 2024-11-21T00:27:03,901 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:27:03,908 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-21T00:27:03,913 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69515891, jitterRate=0.03586749732494354}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:27:03,915 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-21T00:27:03,935 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148823549 2024-11-21T00:27:03,959 DEBUG [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:27:03,960 INFO [RS_OPEN_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-21T00:27:03,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:03,974 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0e7930017ff8,37961,1732148819586, state=OPEN 2024-11-21T00:27:04,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T00:27:04,009 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T00:27:04,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T00:27:04,013 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T00:27:04,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:27:04,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=0e7930017ff8,37961,1732148819586 in 663 msec 2024-11-21T00:27:04,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:27:04,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.3880 sec 2024-11-21T00:27:04,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.8990 sec 2024-11-21T00:27:04,055 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148824054, completionTime=-1 2024-11-21T00:27:04,055 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:27:04,055 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-21T00:27:04,111 DEBUG [hconnection-0x14e842c7-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:04,114 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:04,127 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-21T00:27:04,127 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148884127 2024-11-21T00:27:04,128 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148944128 2024-11-21T00:27:04,128 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 72 msec 2024-11-21T00:27:04,213 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,35089,1732148818012-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:04,214 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,35089,1732148818012-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:04,214 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,35089,1732148818012-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:04,220 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0e7930017ff8:35089, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:04,225 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:04,252 DEBUG [master/0e7930017ff8:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-21T00:27:04,253 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-21T00:27:04,256 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:27:04,273 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-21T00:27:04,281 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:04,283 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:04,289 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:04,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741835_1011 (size=358) 2024-11-21T00:27:04,338 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3572e63abe2094af1c626c3e96fc06ec, NAME => 'hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:27:04,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741836_1012 (size=42) 2024-11-21T00:27:04,467 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:04,467 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 3572e63abe2094af1c626c3e96fc06ec, disabling compactions & flushes 2024-11-21T00:27:04,468 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:27:04,468 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:27:04,468 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. after waiting 0 ms 2024-11-21T00:27:04,468 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:27:04,468 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:27:04,468 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3572e63abe2094af1c626c3e96fc06ec: 2024-11-21T00:27:04,492 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:04,550 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732148824494"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148824494"}]},"ts":"1732148824494"} 2024-11-21T00:27:04,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-21T00:27:04,678 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:04,682 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148824678"}]},"ts":"1732148824678"} 2024-11-21T00:27:04,689 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-21T00:27:04,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3572e63abe2094af1c626c3e96fc06ec, ASSIGN}] 2024-11-21T00:27:04,727 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3572e63abe2094af1c626c3e96fc06ec, ASSIGN 2024-11-21T00:27:04,735 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=3572e63abe2094af1c626c3e96fc06ec, ASSIGN; state=OFFLINE, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=false 2024-11-21T00:27:04,886 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3572e63abe2094af1c626c3e96fc06ec, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:04,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 3572e63abe2094af1c626c3e96fc06ec, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:27:05,055 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:05,079 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:27:05,080 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 3572e63abe2094af1c626c3e96fc06ec, NAME => 'hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:05,081 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:27:05,081 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:05,081 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:27:05,081 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:27:05,095 INFO [StoreOpener-3572e63abe2094af1c626c3e96fc06ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:27:05,104 INFO [StoreOpener-3572e63abe2094af1c626c3e96fc06ec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3572e63abe2094af1c626c3e96fc06ec columnFamilyName info 2024-11-21T00:27:05,105 DEBUG [StoreOpener-3572e63abe2094af1c626c3e96fc06ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:05,108 INFO [StoreOpener-3572e63abe2094af1c626c3e96fc06ec-1 {}] regionserver.HStore(327): Store=3572e63abe2094af1c626c3e96fc06ec/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:05,111 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:27:05,115 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:27:05,122 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:27:05,145 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:05,147 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 3572e63abe2094af1c626c3e96fc06ec; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59876510, jitterRate=-0.10777047276496887}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T00:27:05,148 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 3572e63abe2094af1c626c3e96fc06ec: 2024-11-21T00:27:05,151 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec., pid=6, masterSystemTime=1732148825055 2024-11-21T00:27:05,157 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:27:05,159 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:27:05,160 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3572e63abe2094af1c626c3e96fc06ec, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:05,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:27:05,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 3572e63abe2094af1c626c3e96fc06ec, server=0e7930017ff8,37961,1732148819586 in 273 msec 2024-11-21T00:27:05,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:27:05,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=3572e63abe2094af1c626c3e96fc06ec, ASSIGN in 463 msec 2024-11-21T00:27:05,193 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:05,194 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148825193"}]},"ts":"1732148825193"} 2024-11-21T00:27:05,199 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-21T00:27:05,212 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:05,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 955 msec 2024-11-21T00:27:05,279 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-21T00:27:05,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:05,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-21T00:27:05,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:27:05,362 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-21T00:27:05,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-21T00:27:05,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-21T00:27:05,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 73 msec 2024-11-21T00:27:05,451 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-21T00:27:05,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-21T00:27:05,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 49 msec 2024-11-21T00:27:05,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-21T00:27:05,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-21T00:27:05,570 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.688sec 2024-11-21T00:27:05,571 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:27:05,573 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:27:05,574 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:27:05,574 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:27:05,574 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:27:05,576 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,35089,1732148818012-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:05,576 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,35089,1732148818012-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:27:05,601 DEBUG [master/0e7930017ff8:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:27:05,603 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:27:05,604 INFO [master/0e7930017ff8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0e7930017ff8,35089,1732148818012-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:05,685 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560d619d to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@393cd74f 2024-11-21T00:27:05,686 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-21T00:27:05,747 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18ad730f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:05,752 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-21T00:27:05,753 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-21T00:27:05,779 DEBUG [hconnection-0x5646c6ef-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:05,802 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:05,818 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=0e7930017ff8,35089,1732148818012 2024-11-21T00:27:05,853 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=542, ProcessCount=11, AvailableMemoryMB=3934 2024-11-21T00:27:05,869 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:05,873 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:05,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:27:05,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:05,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:05,906 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:05,907 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:05,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-21T00:27:05,910 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:05,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-21T00:27:05,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741837_1013 (size=963) 2024-11-21T00:27:05,956 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:27:06,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741838_1014 (size=53) 2024-11-21T00:27:06,001 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:06,002 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6103bc2a66018bd699c0a8ab668a67b7, disabling compactions & flushes 2024-11-21T00:27:06,002 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,002 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,002 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. after waiting 0 ms 2024-11-21T00:27:06,002 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,002 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,003 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:06,005 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:06,005 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732148826005"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148826005"}]},"ts":"1732148826005"} 2024-11-21T00:27:06,012 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-21T00:27:06,015 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:06,016 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148826015"}]},"ts":"1732148826015"} 2024-11-21T00:27:06,020 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-21T00:27:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-21T00:27:06,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6103bc2a66018bd699c0a8ab668a67b7, ASSIGN}] 2024-11-21T00:27:06,043 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6103bc2a66018bd699c0a8ab668a67b7, ASSIGN 2024-11-21T00:27:06,046 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6103bc2a66018bd699c0a8ab668a67b7, ASSIGN; state=OFFLINE, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=false 2024-11-21T00:27:06,197 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6103bc2a66018bd699c0a8ab668a67b7, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:06,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:27:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-21T00:27:06,355 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:06,370 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,371 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:06,371 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,371 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:06,371 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,372 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,391 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,396 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:06,397 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6103bc2a66018bd699c0a8ab668a67b7 columnFamilyName A 2024-11-21T00:27:06,397 DEBUG [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:06,404 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.HStore(327): Store=6103bc2a66018bd699c0a8ab668a67b7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:06,404 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,408 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:06,409 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6103bc2a66018bd699c0a8ab668a67b7 columnFamilyName B 2024-11-21T00:27:06,410 DEBUG [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:06,412 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.HStore(327): Store=6103bc2a66018bd699c0a8ab668a67b7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:06,412 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,419 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:06,420 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6103bc2a66018bd699c0a8ab668a67b7 columnFamilyName C 2024-11-21T00:27:06,420 DEBUG [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:06,424 INFO [StoreOpener-6103bc2a66018bd699c0a8ab668a67b7-1 {}] regionserver.HStore(327): Store=6103bc2a66018bd699c0a8ab668a67b7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:06,427 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,429 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,430 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,451 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:27:06,459 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:06,476 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:06,478 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6103bc2a66018bd699c0a8ab668a67b7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67162721, jitterRate=8.025318384170532E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:27:06,480 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:06,482 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., pid=11, masterSystemTime=1732148826354 2024-11-21T00:27:06,487 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,487 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:06,488 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6103bc2a66018bd699c0a8ab668a67b7, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:06,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-21T00:27:06,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 in 292 msec 2024-11-21T00:27:06,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:27:06,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6103bc2a66018bd699c0a8ab668a67b7, ASSIGN in 464 msec 2024-11-21T00:27:06,515 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:06,516 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148826515"}]},"ts":"1732148826515"} 2024-11-21T00:27:06,520 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-21T00:27:06,532 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:06,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 633 msec 2024-11-21T00:27:06,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-21T00:27:06,541 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-21T00:27:06,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x30640414 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36ea98cb 2024-11-21T00:27:06,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@395287ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,576 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,579 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,583 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:06,593 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:06,615 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x536a4a58 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b3a6cb4 2024-11-21T00:27:06,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@774bf929, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,640 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3888ad7c to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@305f2915 2024-11-21T00:27:06,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54af89df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,654 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2dd0bbda to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@dd77b4a 2024-11-21T00:27:06,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@251efa5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,673 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18f2a76d to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ec99212 2024-11-21T00:27:06,688 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67f7d3d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,691 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x435176b2 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37577c9f 2024-11-21T00:27:06,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e6758ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,707 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4e957ecd to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37950159 2024-11-21T00:27:06,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ba01639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,720 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22daddc4 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d5a9f0f 2024-11-21T00:27:06,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6704743, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39028e20 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d4c9c1c 2024-11-21T00:27:06,771 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51cab508, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,773 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x624dc5e5 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3bb819cc 2024-11-21T00:27:06,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fde8946, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:06,793 DEBUG [hconnection-0x76a5e6ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,793 DEBUG [hconnection-0x593641ee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,800 DEBUG [hconnection-0x1edc7dcf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,808 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,816 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,820 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:06,824 DEBUG [hconnection-0x74f48c42-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,827 DEBUG [hconnection-0x38bda5c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,836 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,838 DEBUG [hconnection-0x1b171e57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,844 DEBUG [hconnection-0x3a4b465a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-21T00:27:06,851 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:06,865 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:06,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-21T00:27:06,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:06,871 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,878 DEBUG [hconnection-0x7187c99f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,881 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,906 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,933 DEBUG [hconnection-0x53bc7585-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:06,936 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,938 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45404, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,969 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45414, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:06,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-21T00:27:07,032 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-21T00:27:07,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:07,056 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-21T00:27:07,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:07,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:07,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:07,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:07,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:07,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:07,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:07,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:07,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-21T00:27:07,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/29b9d65a1e614797bbecf0b7586a81cf is 50, key is test_row_0/A:col10/1732148827055/Put/seqid=0 2024-11-21T00:27:07,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148887333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148887360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148887383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148887406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148887412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741839_1015 (size=12001) 2024-11-21T00:27:07,473 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/29b9d65a1e614797bbecf0b7586a81cf 2024-11-21T00:27:07,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-21T00:27:07,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148887534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148887554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148887539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148887561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148887562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/a293ae71a18b4445a684fce1d3bd24aa is 50, key is test_row_0/B:col10/1732148827055/Put/seqid=0 2024-11-21T00:27:07,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148887753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148887781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148887788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148887791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:07,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148887813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:07,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741840_1016 (size=12001) 2024-11-21T00:27:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-21T00:27:08,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148888064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148888091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148888100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148888103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148888123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,221 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/a293ae71a18b4445a684fce1d3bd24aa 2024-11-21T00:27:08,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/c75edd7dbc3e47ce9b5f9c9857617e59 is 50, key is test_row_0/C:col10/1732148827055/Put/seqid=0 2024-11-21T00:27:08,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741841_1017 (size=12001) 2024-11-21T00:27:08,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148888593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148888606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148888613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148888620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:08,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148888635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:08,696 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-21T00:27:08,699 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:27:08,701 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-21T00:27:08,788 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/c75edd7dbc3e47ce9b5f9c9857617e59 2024-11-21T00:27:08,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/29b9d65a1e614797bbecf0b7586a81cf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/29b9d65a1e614797bbecf0b7586a81cf 2024-11-21T00:27:08,850 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/29b9d65a1e614797bbecf0b7586a81cf, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:27:08,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/a293ae71a18b4445a684fce1d3bd24aa as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a293ae71a18b4445a684fce1d3bd24aa 2024-11-21T00:27:08,885 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a293ae71a18b4445a684fce1d3bd24aa, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:27:08,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/c75edd7dbc3e47ce9b5f9c9857617e59 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c75edd7dbc3e47ce9b5f9c9857617e59 2024-11-21T00:27:08,932 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c75edd7dbc3e47ce9b5f9c9857617e59, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:27:08,935 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6103bc2a66018bd699c0a8ab668a67b7 in 1879ms, sequenceid=13, compaction requested=false 2024-11-21T00:27:08,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:08,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:08,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-21T00:27:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-21T00:27:08,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-21T00:27:08,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0770 sec 2024-11-21T00:27:08,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.1280 sec 2024-11-21T00:27:08,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-21T00:27:08,985 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-21T00:27:08,991 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-21T00:27:09,005 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:09,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-21T00:27:09,007 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:09,007 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:09,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-21T00:27:09,161 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-21T00:27:09,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:09,171 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-21T00:27:09,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:09,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:09,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:09,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:09,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:09,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:09,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/0a4fd3cf85154978b31f3dab9988cfde is 50, key is test_row_0/A:col10/1732148827283/Put/seqid=0 2024-11-21T00:27:09,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-21T00:27:09,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741842_1018 (size=12001) 2024-11-21T00:27:09,352 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-21T00:27:09,550 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:27:09,551 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T00:27:09,553 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-21T00:27:09,553 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-21T00:27:09,554 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:27:09,554 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T00:27:09,555 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:27:09,555 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T00:27:09,556 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-21T00:27:09,556 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-21T00:27:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-21T00:27:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:09,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:09,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148889652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148889654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148889656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148889663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148889655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,724 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/0a4fd3cf85154978b31f3dab9988cfde 2024-11-21T00:27:09,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/63a71c57fe4a42909caa1bf0e1f49ba8 is 50, key is test_row_0/B:col10/1732148827283/Put/seqid=0 2024-11-21T00:27:09,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148889768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148889772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148889782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148889775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741843_1019 (size=12001) 2024-11-21T00:27:09,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148889980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148889989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:09,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148889989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148889996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-21T00:27:10,221 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/63a71c57fe4a42909caa1bf0e1f49ba8 2024-11-21T00:27:10,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/3bf800e133ca423ebbf0e5be8e73016d is 50, key is test_row_0/C:col10/1732148827283/Put/seqid=0 2024-11-21T00:27:10,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741844_1020 (size=12001) 2024-11-21T00:27:10,303 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/3bf800e133ca423ebbf0e5be8e73016d 2024-11-21T00:27:10,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148890300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148890300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148890307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148890314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/0a4fd3cf85154978b31f3dab9988cfde as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0a4fd3cf85154978b31f3dab9988cfde 2024-11-21T00:27:10,332 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0a4fd3cf85154978b31f3dab9988cfde, entries=150, sequenceid=37, filesize=11.7 K 2024-11-21T00:27:10,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/63a71c57fe4a42909caa1bf0e1f49ba8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63a71c57fe4a42909caa1bf0e1f49ba8 2024-11-21T00:27:10,361 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63a71c57fe4a42909caa1bf0e1f49ba8, entries=150, sequenceid=37, filesize=11.7 K 2024-11-21T00:27:10,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/3bf800e133ca423ebbf0e5be8e73016d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3bf800e133ca423ebbf0e5be8e73016d 2024-11-21T00:27:10,381 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3bf800e133ca423ebbf0e5be8e73016d, entries=150, sequenceid=37, filesize=11.7 K 2024-11-21T00:27:10,384 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6103bc2a66018bd699c0a8ab668a67b7 in 1212ms, sequenceid=37, compaction requested=false 2024-11-21T00:27:10,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:10,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:10,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-21T00:27:10,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-21T00:27:10,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-21T00:27:10,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3820 sec 2024-11-21T00:27:10,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.4070 sec 2024-11-21T00:27:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:10,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-21T00:27:10,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:10,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:10,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/5d4cbf95fa2b4877bdaf260f62a64353 is 50, key is test_row_0/A:col10/1732148829660/Put/seqid=0 2024-11-21T00:27:10,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741845_1021 (size=12001) 2024-11-21T00:27:10,912 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/5d4cbf95fa2b4877bdaf260f62a64353 2024-11-21T00:27:10,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/0fc7298451a74887b9799cf230d354db is 50, key is test_row_0/B:col10/1732148829660/Put/seqid=0 2024-11-21T00:27:10,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148890963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148890970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148890970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:10,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:10,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148890975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741846_1022 (size=12001) 2024-11-21T00:27:11,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/0fc7298451a74887b9799cf230d354db 2024-11-21T00:27:11,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0837b515c0e64fa291a4b0b47d37d646 is 50, key is test_row_0/C:col10/1732148829660/Put/seqid=0 2024-11-21T00:27:11,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148891082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148891087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148891087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148891088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741847_1023 (size=12001) 2024-11-21T00:27:11,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0837b515c0e64fa291a4b0b47d37d646 2024-11-21T00:27:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-21T00:27:11,119 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-21T00:27:11,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-21T00:27:11,128 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:11,130 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:11,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:11,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-21T00:27:11,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/5d4cbf95fa2b4877bdaf260f62a64353 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/5d4cbf95fa2b4877bdaf260f62a64353 2024-11-21T00:27:11,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/5d4cbf95fa2b4877bdaf260f62a64353, entries=150, sequenceid=50, filesize=11.7 K 2024-11-21T00:27:11,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/0fc7298451a74887b9799cf230d354db as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/0fc7298451a74887b9799cf230d354db 2024-11-21T00:27:11,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/0fc7298451a74887b9799cf230d354db, entries=150, sequenceid=50, filesize=11.7 K 2024-11-21T00:27:11,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0837b515c0e64fa291a4b0b47d37d646 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0837b515c0e64fa291a4b0b47d37d646 2024-11-21T00:27:11,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0837b515c0e64fa291a4b0b47d37d646, entries=150, sequenceid=50, filesize=11.7 K 2024-11-21T00:27:11,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6103bc2a66018bd699c0a8ab668a67b7 in 380ms, sequenceid=50, compaction requested=true 2024-11-21T00:27:11,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:11,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:11,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:11,218 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:11,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:11,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:11,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:11,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:27:11,218 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:11,223 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:11,224 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:11,225 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:11,225 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a293ae71a18b4445a684fce1d3bd24aa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63a71c57fe4a42909caa1bf0e1f49ba8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/0fc7298451a74887b9799cf230d354db] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.2 K 2024-11-21T00:27:11,227 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting a293ae71a18b4445a684fce1d3bd24aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148826974 2024-11-21T00:27:11,229 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 63a71c57fe4a42909caa1bf0e1f49ba8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732148827283 2024-11-21T00:27:11,231 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fc7298451a74887b9799cf230d354db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148829649 2024-11-21T00:27:11,233 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:11,233 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:11,233 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:11,234 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/29b9d65a1e614797bbecf0b7586a81cf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0a4fd3cf85154978b31f3dab9988cfde, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/5d4cbf95fa2b4877bdaf260f62a64353] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.2 K 2024-11-21T00:27:11,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-21T00:27:11,235 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29b9d65a1e614797bbecf0b7586a81cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148826974 2024-11-21T00:27:11,236 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a4fd3cf85154978b31f3dab9988cfde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732148827283 2024-11-21T00:27:11,238 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d4cbf95fa2b4877bdaf260f62a64353, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148829649 2024-11-21T00:27:11,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-21T00:27:11,295 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#9 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:11,296 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/586d1d8ac3454c5da0ac92ed02be9a91 is 50, key is test_row_0/A:col10/1732148829660/Put/seqid=0 2024-11-21T00:27:11,296 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#10 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:11,297 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/c42f11b7b7134e979f91047056422401 is 50, key is test_row_0/B:col10/1732148829660/Put/seqid=0 2024-11-21T00:27:11,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:11,300 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:27:11,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:11,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:11,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:11,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:11,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:11,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:11,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741849_1025 (size=12104) 2024-11-21T00:27:11,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/4ecc2bff61354e779cb75c305a2c2bec is 50, key is test_row_0/A:col10/1732148830916/Put/seqid=0 2024-11-21T00:27:11,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741848_1024 (size=12104) 2024-11-21T00:27:11,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:11,378 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/c42f11b7b7134e979f91047056422401 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c42f11b7b7134e979f91047056422401 2024-11-21T00:27:11,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741850_1026 (size=12001) 2024-11-21T00:27:11,385 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/4ecc2bff61354e779cb75c305a2c2bec 2024-11-21T00:27:11,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/33144f25f2334fc5a907cdd8e0ae516a is 50, key is test_row_0/B:col10/1732148830916/Put/seqid=0 2024-11-21T00:27:11,435 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into c42f11b7b7134e979f91047056422401(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:11,435 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:11,437 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=13, startTime=1732148831218; duration=0sec 2024-11-21T00:27:11,438 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:11,438 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-21T00:27:11,453 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:11,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148891393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148891397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,460 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:11,460 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:11,460 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:11,461 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c75edd7dbc3e47ce9b5f9c9857617e59, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3bf800e133ca423ebbf0e5be8e73016d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0837b515c0e64fa291a4b0b47d37d646] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.2 K 2024-11-21T00:27:11,464 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c75edd7dbc3e47ce9b5f9c9857617e59, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148826974 2024-11-21T00:27:11,468 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bf800e133ca423ebbf0e5be8e73016d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732148827283 2024-11-21T00:27:11,469 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0837b515c0e64fa291a4b0b47d37d646, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148829649 2024-11-21T00:27:11,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148891435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148891457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741851_1027 (size=12001) 2024-11-21T00:27:11,541 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#13 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:11,542 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/4ae801f8a06d4e299af3a1671656df68 is 50, key is test_row_0/C:col10/1732148829660/Put/seqid=0 2024-11-21T00:27:11,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741852_1028 (size=12104) 2024-11-21T00:27:11,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148891563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148891564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148891583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148891588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,615 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/4ae801f8a06d4e299af3a1671656df68 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4ae801f8a06d4e299af3a1671656df68 2024-11-21T00:27:11,634 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into 4ae801f8a06d4e299af3a1671656df68(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:11,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:11,634 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=13, startTime=1732148831218; duration=0sec 2024-11-21T00:27:11,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:11,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:11,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148891672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,686 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4275 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:11,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-21T00:27:11,789 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/586d1d8ac3454c5da0ac92ed02be9a91 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/586d1d8ac3454c5da0ac92ed02be9a91 2024-11-21T00:27:11,808 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into 586d1d8ac3454c5da0ac92ed02be9a91(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:11,808 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:11,808 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=13, startTime=1732148831212; duration=0sec 2024-11-21T00:27:11,809 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:11,809 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:11,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148891808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148891812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148891816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148891813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:11,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/33144f25f2334fc5a907cdd8e0ae516a 2024-11-21T00:27:11,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/6bea144f2f344503b337a8e577fc56cc is 50, key is test_row_0/C:col10/1732148830916/Put/seqid=0 2024-11-21T00:27:12,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741853_1029 (size=12001) 2024-11-21T00:27:12,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148892124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148892124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148892138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148892138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-21T00:27:12,418 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/6bea144f2f344503b337a8e577fc56cc 2024-11-21T00:27:12,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/4ecc2bff61354e779cb75c305a2c2bec as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4ecc2bff61354e779cb75c305a2c2bec 2024-11-21T00:27:12,462 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4ecc2bff61354e779cb75c305a2c2bec, entries=150, sequenceid=73, filesize=11.7 K 2024-11-21T00:27:12,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/33144f25f2334fc5a907cdd8e0ae516a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/33144f25f2334fc5a907cdd8e0ae516a 2024-11-21T00:27:12,492 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/33144f25f2334fc5a907cdd8e0ae516a, entries=150, sequenceid=73, filesize=11.7 K 2024-11-21T00:27:12,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/6bea144f2f344503b337a8e577fc56cc as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6bea144f2f344503b337a8e577fc56cc 2024-11-21T00:27:12,518 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6bea144f2f344503b337a8e577fc56cc, entries=150, sequenceid=73, filesize=11.7 K 2024-11-21T00:27:12,525 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6103bc2a66018bd699c0a8ab668a67b7 in 1223ms, sequenceid=73, compaction requested=false 2024-11-21T00:27:12,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:12,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:12,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-21T00:27:12,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-21T00:27:12,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-21T00:27:12,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3980 sec 2024-11-21T00:27:12,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.4150 sec 2024-11-21T00:27:12,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:12,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:27:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:12,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/75e2f1f2fd6d49809045ba0a14cd9d92 is 50, key is test_row_0/A:col10/1732148832662/Put/seqid=0 2024-11-21T00:27:12,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741854_1030 (size=12001) 2024-11-21T00:27:12,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/75e2f1f2fd6d49809045ba0a14cd9d92 2024-11-21T00:27:12,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/f77a1265657141d28b084ff7f67f27c0 is 50, key is test_row_0/B:col10/1732148832662/Put/seqid=0 2024-11-21T00:27:12,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148892788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148892794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148892800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148892807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741855_1031 (size=12001) 2024-11-21T00:27:12,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148892922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148892932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148892933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:12,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148892933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148893143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148893150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148893155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148893158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-21T00:27:13,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/f77a1265657141d28b084ff7f67f27c0 2024-11-21T00:27:13,269 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-21T00:27:13,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-21T00:27:13,283 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-21T00:27:13,295 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:13,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:13,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/b33ff699c02449809ff276a8f42c9c34 is 50, key is test_row_0/C:col10/1732148832662/Put/seqid=0 2024-11-21T00:27:13,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741856_1032 (size=12001) 2024-11-21T00:27:13,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/b33ff699c02449809ff276a8f42c9c34 2024-11-21T00:27:13,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-21T00:27:13,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/75e2f1f2fd6d49809045ba0a14cd9d92 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/75e2f1f2fd6d49809045ba0a14cd9d92 2024-11-21T00:27:13,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/75e2f1f2fd6d49809045ba0a14cd9d92, entries=150, sequenceid=90, filesize=11.7 K 2024-11-21T00:27:13,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/f77a1265657141d28b084ff7f67f27c0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/f77a1265657141d28b084ff7f67f27c0 2024-11-21T00:27:13,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/f77a1265657141d28b084ff7f67f27c0, entries=150, sequenceid=90, filesize=11.7 K 2024-11-21T00:27:13,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/b33ff699c02449809ff276a8f42c9c34 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b33ff699c02449809ff276a8f42c9c34 2024-11-21T00:27:13,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148893456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,466 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148893456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:13,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:13,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b33ff699c02449809ff276a8f42c9c34, entries=150, sequenceid=90, filesize=11.7 K 2024-11-21T00:27:13,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6103bc2a66018bd699c0a8ab668a67b7 in 806ms, sequenceid=90, compaction requested=true 2024-11-21T00:27:13,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:13,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:13,471 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:13,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:13,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:13,471 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:13,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:13,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:13,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:13,473 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:13,473 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:13,473 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,473 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:13,473 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:13,473 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c42f11b7b7134e979f91047056422401, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/33144f25f2334fc5a907cdd8e0ae516a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/f77a1265657141d28b084ff7f67f27c0] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.3 K 2024-11-21T00:27:13,473 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,474 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/586d1d8ac3454c5da0ac92ed02be9a91, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4ecc2bff61354e779cb75c305a2c2bec, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/75e2f1f2fd6d49809045ba0a14cd9d92] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.3 K 2024-11-21T00:27:13,474 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c42f11b7b7134e979f91047056422401, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148829649 2024-11-21T00:27:13,475 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 586d1d8ac3454c5da0ac92ed02be9a91, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148829649 2024-11-21T00:27:13,475 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 33144f25f2334fc5a907cdd8e0ae516a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732148830916 2024-11-21T00:27:13,476 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ecc2bff61354e779cb75c305a2c2bec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732148830916 2024-11-21T00:27:13,476 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f77a1265657141d28b084ff7f67f27c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732148831431 2024-11-21T00:27:13,478 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75e2f1f2fd6d49809045ba0a14cd9d92, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732148831431 2024-11-21T00:27:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:13,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:27:13,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:13,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:13,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:13,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:13,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:13,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:13,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/c559fafb727d402388f1f9da50e92006 is 50, key is test_row_0/A:col10/1732148833483/Put/seqid=0 2024-11-21T00:27:13,521 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#19 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:13,522 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/fd2213efdd3f434283281c336a25b93e is 50, key is test_row_0/A:col10/1732148832662/Put/seqid=0 2024-11-21T00:27:13,585 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#20 average throughput is 0.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:13,586 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/ebd0ec4f809c489fb6e8e7de88f674ab is 50, key is test_row_0/B:col10/1732148832662/Put/seqid=0 2024-11-21T00:27:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-21T00:27:13,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741858_1034 (size=12207) 2024-11-21T00:27:13,634 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741857_1033 (size=14341) 2024-11-21T00:27:13,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:13,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:13,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148893643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741859_1035 (size=12207) 2024-11-21T00:27:13,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148893652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,700 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/ebd0ec4f809c489fb6e8e7de88f674ab as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ebd0ec4f809c489fb6e8e7de88f674ab 2024-11-21T00:27:13,716 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into ebd0ec4f809c489fb6e8e7de88f674ab(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:13,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:13,717 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=13, startTime=1732148833471; duration=0sec 2024-11-21T00:27:13,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:13,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:13,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:13,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:13,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:13,720 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,721 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4ae801f8a06d4e299af3a1671656df68, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6bea144f2f344503b337a8e577fc56cc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b33ff699c02449809ff276a8f42c9c34] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.3 K 2024-11-21T00:27:13,722 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ae801f8a06d4e299af3a1671656df68, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148829649 2024-11-21T00:27:13,723 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bea144f2f344503b337a8e577fc56cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732148830916 2024-11-21T00:27:13,724 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b33ff699c02449809ff276a8f42c9c34, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732148831431 2024-11-21T00:27:13,761 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#21 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:13,762 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/f330e8b6f035409aa4a3484cc2903bf1 is 50, key is test_row_0/C:col10/1732148832662/Put/seqid=0 2024-11-21T00:27:13,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148893769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148893791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,798 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:13,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:13,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741860_1036 (size=12207) 2024-11-21T00:27:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-21T00:27:13,968 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:13,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:13,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148893967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:13,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:13,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148893973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148893998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,016 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/fd2213efdd3f434283281c336a25b93e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/fd2213efdd3f434283281c336a25b93e 2024-11-21T00:27:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148894005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,034 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into fd2213efdd3f434283281c336a25b93e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:14,034 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:14,034 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=13, startTime=1732148833471; duration=0sec 2024-11-21T00:27:14,034 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:14,035 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:14,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/c559fafb727d402388f1f9da50e92006 2024-11-21T00:27:14,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5d483b9b53914776aa41d55ad79bec9d is 50, key is test_row_0/B:col10/1732148833483/Put/seqid=0 2024-11-21T00:27:14,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:14,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:14,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741861_1037 (size=12001) 2024-11-21T00:27:14,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5d483b9b53914776aa41d55ad79bec9d 2024-11-21T00:27:14,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ccd790e1199b484aac8222340212e615 is 50, key is test_row_0/C:col10/1732148833483/Put/seqid=0 2024-11-21T00:27:14,228 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/f330e8b6f035409aa4a3484cc2903bf1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f330e8b6f035409aa4a3484cc2903bf1 2024-11-21T00:27:14,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741862_1038 (size=12001) 2024-11-21T00:27:14,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ccd790e1199b484aac8222340212e615 2024-11-21T00:27:14,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/c559fafb727d402388f1f9da50e92006 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c559fafb727d402388f1f9da50e92006 2024-11-21T00:27:14,257 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into f330e8b6f035409aa4a3484cc2903bf1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:14,257 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:14,257 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=13, startTime=1732148833471; duration=0sec 2024-11-21T00:27:14,257 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:14,257 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:14,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c559fafb727d402388f1f9da50e92006, entries=200, sequenceid=114, filesize=14.0 K 2024-11-21T00:27:14,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5d483b9b53914776aa41d55ad79bec9d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5d483b9b53914776aa41d55ad79bec9d 2024-11-21T00:27:14,279 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5d483b9b53914776aa41d55ad79bec9d, entries=150, sequenceid=114, filesize=11.7 K 2024-11-21T00:27:14,279 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:14,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:14,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ccd790e1199b484aac8222340212e615 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ccd790e1199b484aac8222340212e615 2024-11-21T00:27:14,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ccd790e1199b484aac8222340212e615, entries=150, sequenceid=114, filesize=11.7 K 2024-11-21T00:27:14,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6103bc2a66018bd699c0a8ab668a67b7 in 812ms, sequenceid=114, compaction requested=false 2024-11-21T00:27:14,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:14,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:14,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-21T00:27:14,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:14,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:14,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:14,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:14,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:14,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:14,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1849cfef9e9542b888cfc0d1f5b434ca is 50, key is test_row_0/A:col10/1732148833569/Put/seqid=0 2024-11-21T00:27:14,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741863_1039 (size=14391) 2024-11-21T00:27:14,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1849cfef9e9542b888cfc0d1f5b434ca 2024-11-21T00:27:14,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-21T00:27:14,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/63c3f2deff3b45ecae792f19d0126226 is 50, key is test_row_0/B:col10/1732148833569/Put/seqid=0 2024-11-21T00:27:14,434 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148894456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741864_1040 (size=12051) 2024-11-21T00:27:14,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148894462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148894572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:14,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:14,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148894578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,745 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:14,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:14,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148894782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148894796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/63c3f2deff3b45ecae792f19d0126226 2024-11-21T00:27:14,904 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:14,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:14,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:14,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:14,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/542a308a694440f2aa2648e664e44372 is 50, key is test_row_0/C:col10/1732148833569/Put/seqid=0 2024-11-21T00:27:14,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741865_1041 (size=12051) 2024-11-21T00:27:14,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/542a308a694440f2aa2648e664e44372 2024-11-21T00:27:14,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:14,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148894978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:14,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1849cfef9e9542b888cfc0d1f5b434ca as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1849cfef9e9542b888cfc0d1f5b434ca 2024-11-21T00:27:15,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148894988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1849cfef9e9542b888cfc0d1f5b434ca, entries=200, sequenceid=130, filesize=14.1 K 2024-11-21T00:27:15,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/63c3f2deff3b45ecae792f19d0126226 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63c3f2deff3b45ecae792f19d0126226 2024-11-21T00:27:15,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63c3f2deff3b45ecae792f19d0126226, entries=150, sequenceid=130, filesize=11.8 K 2024-11-21T00:27:15,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/542a308a694440f2aa2648e664e44372 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/542a308a694440f2aa2648e664e44372 2024-11-21T00:27:15,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/542a308a694440f2aa2648e664e44372, entries=150, sequenceid=130, filesize=11.8 K 2024-11-21T00:27:15,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6103bc2a66018bd699c0a8ab668a67b7 in 712ms, sequenceid=130, compaction requested=true 2024-11-21T00:27:15,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:15,035 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:15,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:15,037 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40939 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:15,038 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:15,038 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:15,038 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/fd2213efdd3f434283281c336a25b93e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c559fafb727d402388f1f9da50e92006, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1849cfef9e9542b888cfc0d1f5b434ca] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=40.0 K 2024-11-21T00:27:15,039 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fd2213efdd3f434283281c336a25b93e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732148831431 2024-11-21T00:27:15,040 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c559fafb727d402388f1f9da50e92006, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732148832716 2024-11-21T00:27:15,041 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1849cfef9e9542b888cfc0d1f5b434ca, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732148833569 2024-11-21T00:27:15,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:15,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:15,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:15,041 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:15,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:15,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:15,043 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:15,044 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:15,044 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:15,044 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ebd0ec4f809c489fb6e8e7de88f674ab, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5d483b9b53914776aa41d55ad79bec9d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63c3f2deff3b45ecae792f19d0126226] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.4 K 2024-11-21T00:27:15,045 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebd0ec4f809c489fb6e8e7de88f674ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732148831431 2024-11-21T00:27:15,046 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d483b9b53914776aa41d55ad79bec9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732148832716 2024-11-21T00:27:15,048 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63c3f2deff3b45ecae792f19d0126226, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732148833569 2024-11-21T00:27:15,091 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-21T00:27:15,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:15,100 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:27:15,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:15,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:15,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:15,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:15,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:15,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:15,115 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#27 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:15,116 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/596e47d4435f4c94b3215c98ec1adfeb is 50, key is test_row_0/A:col10/1732148833569/Put/seqid=0 2024-11-21T00:27:15,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:15,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:15,137 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:15,139 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/6cfeeedb15d04364a5facf93e9350793 is 50, key is test_row_0/B:col10/1732148833569/Put/seqid=0 2024-11-21T00:27:15,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/e957432f6b97469a963d5033235f5cb0 is 50, key is test_row_0/A:col10/1732148834451/Put/seqid=0 2024-11-21T00:27:15,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741866_1042 (size=12359) 2024-11-21T00:27:15,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148895185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148895210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741867_1043 (size=12359) 2024-11-21T00:27:15,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741868_1044 (size=12151) 2024-11-21T00:27:15,269 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/e957432f6b97469a963d5033235f5cb0 2024-11-21T00:27:15,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5138891cb78f4d9bac21cc3a015f8962 is 50, key is test_row_0/B:col10/1732148834451/Put/seqid=0 2024-11-21T00:27:15,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148895318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148895334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741869_1045 (size=12151) 2024-11-21T00:27:15,347 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5138891cb78f4d9bac21cc3a015f8962 2024-11-21T00:27:15,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/984fd39907744b2d8822d60d1b94c43e is 50, key is test_row_0/C:col10/1732148834451/Put/seqid=0 2024-11-21T00:27:15,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-21T00:27:15,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741870_1046 (size=12151) 2024-11-21T00:27:15,425 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/984fd39907744b2d8822d60d1b94c43e 2024-11-21T00:27:15,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/e957432f6b97469a963d5033235f5cb0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e957432f6b97469a963d5033235f5cb0 2024-11-21T00:27:15,447 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e957432f6b97469a963d5033235f5cb0, entries=150, sequenceid=153, filesize=11.9 K 2024-11-21T00:27:15,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5138891cb78f4d9bac21cc3a015f8962 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5138891cb78f4d9bac21cc3a015f8962 2024-11-21T00:27:15,459 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5138891cb78f4d9bac21cc3a015f8962, entries=150, sequenceid=153, filesize=11.9 K 2024-11-21T00:27:15,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/984fd39907744b2d8822d60d1b94c43e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/984fd39907744b2d8822d60d1b94c43e 2024-11-21T00:27:15,472 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/984fd39907744b2d8822d60d1b94c43e, entries=150, sequenceid=153, filesize=11.9 K 2024-11-21T00:27:15,474 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6103bc2a66018bd699c0a8ab668a67b7 in 374ms, sequenceid=153, compaction requested=true 2024-11-21T00:27:15,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:15,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:15,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-21T00:27:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-21T00:27:15,481 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-21T00:27:15,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1810 sec 2024-11-21T00:27:15,487 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.2040 sec 2024-11-21T00:27:15,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:27:15,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:15,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:15,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:15,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:15,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:15,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:15,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:15,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/b5118c8b4d2144e3b18051cea0651d08 is 50, key is test_row_0/A:col10/1732148835208/Put/seqid=0 2024-11-21T00:27:15,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741871_1047 (size=14541) 2024-11-21T00:27:15,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/b5118c8b4d2144e3b18051cea0651d08 2024-11-21T00:27:15,626 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/596e47d4435f4c94b3215c98ec1adfeb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/596e47d4435f4c94b3215c98ec1adfeb 2024-11-21T00:27:15,644 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into 596e47d4435f4c94b3215c98ec1adfeb(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:15,644 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:15,644 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=13, startTime=1732148835035; duration=0sec 2024-11-21T00:27:15,644 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:15,644 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:15,644 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:15,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/fac2656ceb0d4ffdbab6ddf2a283a702 is 50, key is test_row_0/B:col10/1732148835208/Put/seqid=0 2024-11-21T00:27:15,651 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:15,652 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:15,652 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:15,652 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f330e8b6f035409aa4a3484cc2903bf1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ccd790e1199b484aac8222340212e615, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/542a308a694440f2aa2648e664e44372, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/984fd39907744b2d8822d60d1b94c43e] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=47.3 K 2024-11-21T00:27:15,655 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f330e8b6f035409aa4a3484cc2903bf1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732148831431 2024-11-21T00:27:15,656 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ccd790e1199b484aac8222340212e615, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732148832716 2024-11-21T00:27:15,657 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 542a308a694440f2aa2648e664e44372, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732148833569 2024-11-21T00:27:15,657 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 984fd39907744b2d8822d60d1b94c43e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732148834438 2024-11-21T00:27:15,659 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/6cfeeedb15d04364a5facf93e9350793 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/6cfeeedb15d04364a5facf93e9350793 2024-11-21T00:27:15,671 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into 6cfeeedb15d04364a5facf93e9350793(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:15,673 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:15,673 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=13, startTime=1732148835041; duration=0sec 2024-11-21T00:27:15,673 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:15,673 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:15,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148895670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148895676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741872_1048 (size=12151) 2024-11-21T00:27:15,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148895699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,711 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8301 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:15,717 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#34 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:15,718 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/3fa7683d74db4663b040a5ebe0d94837 is 50, key is test_row_0/C:col10/1732148834451/Put/seqid=0 2024-11-21T00:27:15,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741873_1049 (size=12493) 2024-11-21T00:27:15,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148895779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148895788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:15,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:15,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148895991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:16,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:16,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148896003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:16,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/fac2656ceb0d4ffdbab6ddf2a283a702 2024-11-21T00:27:16,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/b785fc8496d54b7dbeec8d7ac70c8156 is 50, key is test_row_0/C:col10/1732148835208/Put/seqid=0 2024-11-21T00:27:16,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741874_1050 (size=12151) 2024-11-21T00:27:16,196 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/3fa7683d74db4663b040a5ebe0d94837 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3fa7683d74db4663b040a5ebe0d94837 2024-11-21T00:27:16,207 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into 3fa7683d74db4663b040a5ebe0d94837(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:16,207 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:16,208 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=12, startTime=1732148835041; duration=0sec 2024-11-21T00:27:16,208 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:16,208 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:16,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148896298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:16,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148896326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:16,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/b785fc8496d54b7dbeec8d7ac70c8156 2024-11-21T00:27:16,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/b5118c8b4d2144e3b18051cea0651d08 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b5118c8b4d2144e3b18051cea0651d08 2024-11-21T00:27:16,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b5118c8b4d2144e3b18051cea0651d08, entries=200, sequenceid=167, filesize=14.2 K 2024-11-21T00:27:16,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/fac2656ceb0d4ffdbab6ddf2a283a702 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/fac2656ceb0d4ffdbab6ddf2a283a702 2024-11-21T00:27:16,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/fac2656ceb0d4ffdbab6ddf2a283a702, entries=150, sequenceid=167, filesize=11.9 K 2024-11-21T00:27:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/b785fc8496d54b7dbeec8d7ac70c8156 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b785fc8496d54b7dbeec8d7ac70c8156 2024-11-21T00:27:16,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b785fc8496d54b7dbeec8d7ac70c8156, entries=150, sequenceid=167, filesize=11.9 K 2024-11-21T00:27:16,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6103bc2a66018bd699c0a8ab668a67b7 in 1100ms, sequenceid=167, compaction requested=true 2024-11-21T00:27:16,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:16,636 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:16,637 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:16,638 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:16,639 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:16,639 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:16,639 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/596e47d4435f4c94b3215c98ec1adfeb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e957432f6b97469a963d5033235f5cb0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b5118c8b4d2144e3b18051cea0651d08] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=38.1 K 2024-11-21T00:27:16,639 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:16,639 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:16,639 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:16,640 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/6cfeeedb15d04364a5facf93e9350793, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5138891cb78f4d9bac21cc3a015f8962, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/fac2656ceb0d4ffdbab6ddf2a283a702] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.8 K 2024-11-21T00:27:16,640 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 596e47d4435f4c94b3215c98ec1adfeb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732148833569 2024-11-21T00:27:16,640 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cfeeedb15d04364a5facf93e9350793, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732148833569 2024-11-21T00:27:16,641 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e957432f6b97469a963d5033235f5cb0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732148834438 2024-11-21T00:27:16,641 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5138891cb78f4d9bac21cc3a015f8962, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732148834438 2024-11-21T00:27:16,641 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5118c8b4d2144e3b18051cea0651d08, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732148835169 2024-11-21T00:27:16,641 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fac2656ceb0d4ffdbab6ddf2a283a702, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732148835169 2024-11-21T00:27:16,668 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#36 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:16,669 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/b24d16631e69417db90d38b0d57e7242 is 50, key is test_row_0/A:col10/1732148835208/Put/seqid=0 2024-11-21T00:27:16,682 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#37 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:16,682 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/81d70bb99c15415ab8009a8ac26f0cde is 50, key is test_row_0/B:col10/1732148835208/Put/seqid=0 2024-11-21T00:27:16,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741875_1051 (size=12561) 2024-11-21T00:27:16,746 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/b24d16631e69417db90d38b0d57e7242 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b24d16631e69417db90d38b0d57e7242 2024-11-21T00:27:16,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741876_1052 (size=12561) 2024-11-21T00:27:16,767 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into b24d16631e69417db90d38b0d57e7242(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:16,768 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:16,768 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=13, startTime=1732148836636; duration=0sec 2024-11-21T00:27:16,768 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:16,768 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:16,768 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:27:16,770 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:16,770 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:16,770 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:16,770 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:16,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:16,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:27:16,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:16,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:16,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:16,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:16,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:16,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:16,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/ff0734378c4242f0a5c19979e612bc6e is 50, key is test_row_0/A:col10/1732148835602/Put/seqid=0 2024-11-21T00:27:16,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:16,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741877_1053 (size=14541) 2024-11-21T00:27:16,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148896893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:16,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148896895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:16,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148896998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148897008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148897008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,015 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4227 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:17,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148897023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,035 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4229 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:17,180 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/81d70bb99c15415ab8009a8ac26f0cde as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/81d70bb99c15415ab8009a8ac26f0cde 2024-11-21T00:27:17,196 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into 81d70bb99c15415ab8009a8ac26f0cde(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:17,196 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:17,196 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=13, startTime=1732148836637; duration=0sec 2024-11-21T00:27:17,197 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:17,197 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:17,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148897202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148897221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/ff0734378c4242f0a5c19979e612bc6e 2024-11-21T00:27:17,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/23b3d87afe1a4807a0211d77427de2df is 50, key is test_row_0/B:col10/1732148835602/Put/seqid=0 2024-11-21T00:27:17,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741878_1054 (size=12151) 2024-11-21T00:27:17,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/23b3d87afe1a4807a0211d77427de2df 2024-11-21T00:27:17,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-21T00:27:17,407 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-21T00:27:17,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:17,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-21T00:27:17,420 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:17,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-21T00:27:17,422 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:17,422 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:17,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/9340bd109ebc4623996114de60b4bd4e is 50, key is test_row_0/C:col10/1732148835602/Put/seqid=0 2024-11-21T00:27:17,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741879_1055 (size=12151) 2024-11-21T00:27:17,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/9340bd109ebc4623996114de60b4bd4e 2024-11-21T00:27:17,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/ff0734378c4242f0a5c19979e612bc6e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/ff0734378c4242f0a5c19979e612bc6e 2024-11-21T00:27:17,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148897508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/ff0734378c4242f0a5c19979e612bc6e, entries=200, sequenceid=194, filesize=14.2 K 2024-11-21T00:27:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-21T00:27:17,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/23b3d87afe1a4807a0211d77427de2df as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/23b3d87afe1a4807a0211d77427de2df 2024-11-21T00:27:17,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/23b3d87afe1a4807a0211d77427de2df, entries=150, sequenceid=194, filesize=11.9 K 2024-11-21T00:27:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/9340bd109ebc4623996114de60b4bd4e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/9340bd109ebc4623996114de60b4bd4e 2024-11-21T00:27:17,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148897529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/9340bd109ebc4623996114de60b4bd4e, entries=150, sequenceid=194, filesize=11.9 K 2024-11-21T00:27:17,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 6103bc2a66018bd699c0a8ab668a67b7 in 732ms, sequenceid=194, compaction requested=true 2024-11-21T00:27:17,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:17,555 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:27:17,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:17,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:17,556 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:27:17,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:17,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:17,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:17,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:17,556 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:17,556 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:17,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:17,557 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:17,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:17,557 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:17,557 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:17,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:17,557 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:17,559 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:17,560 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:17,560 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:17,560 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3fa7683d74db4663b040a5ebe0d94837, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b785fc8496d54b7dbeec8d7ac70c8156, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/9340bd109ebc4623996114de60b4bd4e] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=35.9 K 2024-11-21T00:27:17,561 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fa7683d74db4663b040a5ebe0d94837, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732148834438 2024-11-21T00:27:17,561 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b785fc8496d54b7dbeec8d7ac70c8156, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732148835169 2024-11-21T00:27:17,562 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9340bd109ebc4623996114de60b4bd4e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732148835602 2024-11-21T00:27:17,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:17,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-21T00:27:17,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:17,576 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:27:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:17,593 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#41 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:17,593 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/4284908554ef43e890f662a1ea09e969 is 50, key is test_row_0/C:col10/1732148835602/Put/seqid=0 2024-11-21T00:27:17,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/20e360dbcb93470face4097212a052ef is 50, key is test_row_0/A:col10/1732148836879/Put/seqid=0 2024-11-21T00:27:17,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741881_1057 (size=12151) 2024-11-21T00:27:17,646 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/20e360dbcb93470face4097212a052ef 2024-11-21T00:27:17,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741880_1056 (size=12595) 2024-11-21T00:27:17,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/e8d06f12296a4fb88a8c3a9421c9ec68 is 50, key is test_row_0/B:col10/1732148836879/Put/seqid=0 2024-11-21T00:27:17,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-21T00:27:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741882_1058 (size=12151) 2024-11-21T00:27:18,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-21T00:27:18,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:18,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:18,092 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/4284908554ef43e890f662a1ea09e969 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4284908554ef43e890f662a1ea09e969 2024-11-21T00:27:18,111 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into 4284908554ef43e890f662a1ea09e969(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:18,111 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:18,111 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=13, startTime=1732148837556; duration=0sec 2024-11-21T00:27:18,111 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:18,111 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:18,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148898114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148898125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,140 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/e8d06f12296a4fb88a8c3a9421c9ec68 2024-11-21T00:27:18,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/f09bc1262b5b4c13b8a562d2feedbb33 is 50, key is test_row_0/C:col10/1732148836879/Put/seqid=0 2024-11-21T00:27:18,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741883_1059 (size=12151) 2024-11-21T00:27:18,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148898234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148898238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148898439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148898443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-21T00:27:18,591 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/f09bc1262b5b4c13b8a562d2feedbb33 2024-11-21T00:27:18,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/20e360dbcb93470face4097212a052ef as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/20e360dbcb93470face4097212a052ef 2024-11-21T00:27:18,613 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/20e360dbcb93470face4097212a052ef, entries=150, sequenceid=209, filesize=11.9 K 2024-11-21T00:27:18,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/e8d06f12296a4fb88a8c3a9421c9ec68 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/e8d06f12296a4fb88a8c3a9421c9ec68 2024-11-21T00:27:18,625 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/e8d06f12296a4fb88a8c3a9421c9ec68, entries=150, sequenceid=209, filesize=11.9 K 2024-11-21T00:27:18,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/f09bc1262b5b4c13b8a562d2feedbb33 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f09bc1262b5b4c13b8a562d2feedbb33 2024-11-21T00:27:18,638 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f09bc1262b5b4c13b8a562d2feedbb33, entries=150, sequenceid=209, filesize=11.9 K 2024-11-21T00:27:18,640 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6103bc2a66018bd699c0a8ab668a67b7 in 1063ms, sequenceid=209, compaction requested=true 2024-11-21T00:27:18,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:18,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:18,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-21T00:27:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-21T00:27:18,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-21T00:27:18,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2220 sec 2024-11-21T00:27:18,650 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.2320 sec 2024-11-21T00:27:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:18,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:27:18,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:18,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:18,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:18,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:18,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:18,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:18,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/edceee44a132450eaacf544fb9ec348c is 50, key is test_row_0/A:col10/1732148838111/Put/seqid=0 2024-11-21T00:27:18,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148898775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148898778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741884_1060 (size=12151) 2024-11-21T00:27:18,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148898883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:18,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148898883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148899088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148899089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/edceee44a132450eaacf544fb9ec348c 2024-11-21T00:27:19,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/310f918b94d6443d96f3977b24a20a7b is 50, key is test_row_0/B:col10/1732148838111/Put/seqid=0 2024-11-21T00:27:19,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741885_1061 (size=12151) 2024-11-21T00:27:19,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/310f918b94d6443d96f3977b24a20a7b 2024-11-21T00:27:19,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/e77664447a68400eb3083869101841c3 is 50, key is test_row_0/C:col10/1732148838111/Put/seqid=0 2024-11-21T00:27:19,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741886_1062 (size=12151) 2024-11-21T00:27:19,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/e77664447a68400eb3083869101841c3 2024-11-21T00:27:19,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/edceee44a132450eaacf544fb9ec348c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/edceee44a132450eaacf544fb9ec348c 2024-11-21T00:27:19,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/edceee44a132450eaacf544fb9ec348c, entries=150, sequenceid=233, filesize=11.9 K 2024-11-21T00:27:19,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/310f918b94d6443d96f3977b24a20a7b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/310f918b94d6443d96f3977b24a20a7b 2024-11-21T00:27:19,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/310f918b94d6443d96f3977b24a20a7b, entries=150, sequenceid=233, filesize=11.9 K 2024-11-21T00:27:19,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/e77664447a68400eb3083869101841c3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e77664447a68400eb3083869101841c3 2024-11-21T00:27:19,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e77664447a68400eb3083869101841c3, entries=150, sequenceid=233, filesize=11.9 K 2024-11-21T00:27:19,342 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-21T00:27:19,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6103bc2a66018bd699c0a8ab668a67b7 in 589ms, sequenceid=233, compaction requested=true 2024-11-21T00:27:19,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:19,344 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:19,346 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:19,347 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:19,347 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:19,347 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:19,347 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b24d16631e69417db90d38b0d57e7242, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/ff0734378c4242f0a5c19979e612bc6e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/20e360dbcb93470face4097212a052ef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/edceee44a132450eaacf544fb9ec348c] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=50.2 K 2024-11-21T00:27:19,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:19,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:19,348 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b24d16631e69417db90d38b0d57e7242, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732148835169 2024-11-21T00:27:19,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:19,348 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:19,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:19,349 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:19,349 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:19,349 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/81d70bb99c15415ab8009a8ac26f0cde, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/23b3d87afe1a4807a0211d77427de2df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/e8d06f12296a4fb88a8c3a9421c9ec68, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/310f918b94d6443d96f3977b24a20a7b] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=47.9 K 2024-11-21T00:27:19,349 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 81d70bb99c15415ab8009a8ac26f0cde, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732148835169 2024-11-21T00:27:19,349 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff0734378c4242f0a5c19979e612bc6e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732148835602 2024-11-21T00:27:19,350 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 23b3d87afe1a4807a0211d77427de2df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732148835602 2024-11-21T00:27:19,350 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20e360dbcb93470face4097212a052ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732148836860 2024-11-21T00:27:19,350 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e8d06f12296a4fb88a8c3a9421c9ec68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732148836860 2024-11-21T00:27:19,351 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting edceee44a132450eaacf544fb9ec348c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732148838094 2024-11-21T00:27:19,351 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 310f918b94d6443d96f3977b24a20a7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732148838094 2024-11-21T00:27:19,370 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#48 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:19,371 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/655b508040bd499c8d5948c475b6637f is 50, key is test_row_0/B:col10/1732148838111/Put/seqid=0 2024-11-21T00:27:19,378 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#49 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:19,379 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/4070b4542edb4476a77020b8c4ef83bf is 50, key is test_row_0/A:col10/1732148838111/Put/seqid=0 2024-11-21T00:27:19,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:19,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:27:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:19,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:19,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:19,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741887_1063 (size=12697) 2024-11-21T00:27:19,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741888_1064 (size=12697) 2024-11-21T00:27:19,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/9d32edb1635d4b98a831921507203342 is 50, key is test_row_0/A:col10/1732148839395/Put/seqid=0 2024-11-21T00:27:19,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741889_1065 (size=12151) 2024-11-21T00:27:19,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/9d32edb1635d4b98a831921507203342 2024-11-21T00:27:19,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/efc222aa709144c1a5ac24403ee6286b is 50, key is test_row_0/B:col10/1732148839395/Put/seqid=0 2024-11-21T00:27:19,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148899468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148899470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741890_1066 (size=12151) 2024-11-21T00:27:19,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/efc222aa709144c1a5ac24403ee6286b 2024-11-21T00:27:19,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ba30f2a9c0754f37b02a09717eddce04 is 50, key is test_row_0/C:col10/1732148839395/Put/seqid=0 2024-11-21T00:27:19,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-21T00:27:19,528 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-21T00:27:19,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:19,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-21T00:27:19,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-21T00:27:19,535 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:19,536 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:19,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:19,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741891_1067 (size=12151) 2024-11-21T00:27:19,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ba30f2a9c0754f37b02a09717eddce04 2024-11-21T00:27:19,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/9d32edb1635d4b98a831921507203342 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9d32edb1635d4b98a831921507203342 2024-11-21T00:27:19,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9d32edb1635d4b98a831921507203342, entries=150, sequenceid=247, filesize=11.9 K 2024-11-21T00:27:19,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/efc222aa709144c1a5ac24403ee6286b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/efc222aa709144c1a5ac24403ee6286b 2024-11-21T00:27:19,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/efc222aa709144c1a5ac24403ee6286b, entries=150, sequenceid=247, filesize=11.9 K 2024-11-21T00:27:19,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ba30f2a9c0754f37b02a09717eddce04 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ba30f2a9c0754f37b02a09717eddce04 2024-11-21T00:27:19,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148899574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148899574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ba30f2a9c0754f37b02a09717eddce04, entries=150, sequenceid=247, filesize=11.9 K 2024-11-21T00:27:19,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6103bc2a66018bd699c0a8ab668a67b7 in 187ms, sequenceid=247, compaction requested=true 2024-11-21T00:27:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:27:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-21T00:27:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-21T00:27:19,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-21T00:27:19,690 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-21T00:27:19,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:19,691 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-21T00:27:19,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:19,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:19,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:19,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/cd716e7b8e9e44f9ae6a3b875b4915d0 is 50, key is test_row_0/A:col10/1732148839466/Put/seqid=0 2024-11-21T00:27:19,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741892_1068 (size=12301) 2024-11-21T00:27:19,723 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/cd716e7b8e9e44f9ae6a3b875b4915d0 2024-11-21T00:27:19,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/519d08a92c924353adb66931e624a86d is 50, key is test_row_0/B:col10/1732148839466/Put/seqid=0 2024-11-21T00:27:19,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:19,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:19,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741893_1069 (size=12301) 2024-11-21T00:27:19,795 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/519d08a92c924353adb66931e624a86d 2024-11-21T00:27:19,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0ef34c46c1d84f829ba7a657545989d2 is 50, key is test_row_0/C:col10/1732148839466/Put/seqid=0 2024-11-21T00:27:19,817 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/655b508040bd499c8d5948c475b6637f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/655b508040bd499c8d5948c475b6637f 2024-11-21T00:27:19,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148899818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:19,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148899814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:19,826 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/4070b4542edb4476a77020b8c4ef83bf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4070b4542edb4476a77020b8c4ef83bf 2024-11-21T00:27:19,832 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into 655b508040bd499c8d5948c475b6637f(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:19,832 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:19,832 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=12, startTime=1732148839345; duration=0sec 2024-11-21T00:27:19,832 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-21T00:27:19,832 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:19,832 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 4 compacting, 1 eligible, 16 blocking 2024-11-21T00:27:19,836 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:19,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-21T00:27:19,837 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:19,837 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:19,838 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:19,838 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:19,838 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:19,840 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:19,840 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:19,840 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:19,840 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4284908554ef43e890f662a1ea09e969, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f09bc1262b5b4c13b8a562d2feedbb33, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e77664447a68400eb3083869101841c3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ba30f2a9c0754f37b02a09717eddce04] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=47.9 K 2024-11-21T00:27:19,841 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into 4070b4542edb4476a77020b8c4ef83bf(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:19,841 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=12, startTime=1732148839344; duration=0sec 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4284908554ef43e890f662a1ea09e969, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732148835602 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f09bc1262b5b4c13b8a562d2feedbb33, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732148836860 2024-11-21T00:27:19,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:19,842 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:27:19,842 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e77664447a68400eb3083869101841c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732148838094 2024-11-21T00:27:19,842 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ba30f2a9c0754f37b02a09717eddce04, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148838772 2024-11-21T00:27:19,842 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:19,842 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:19,842 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:19,842 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:19,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741894_1070 (size=12301) 2024-11-21T00:27:19,848 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0ef34c46c1d84f829ba7a657545989d2 2024-11-21T00:27:19,861 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#56 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:19,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/cd716e7b8e9e44f9ae6a3b875b4915d0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/cd716e7b8e9e44f9ae6a3b875b4915d0 2024-11-21T00:27:19,862 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/15e5c95b4b234ac2aaab33c1f298b575 is 50, key is test_row_0/C:col10/1732148839395/Put/seqid=0 2024-11-21T00:27:19,869 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/cd716e7b8e9e44f9ae6a3b875b4915d0, entries=150, sequenceid=269, filesize=12.0 K 2024-11-21T00:27:19,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/519d08a92c924353adb66931e624a86d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/519d08a92c924353adb66931e624a86d 2024-11-21T00:27:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741895_1071 (size=12731) 2024-11-21T00:27:19,888 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/519d08a92c924353adb66931e624a86d, entries=150, sequenceid=269, filesize=12.0 K 2024-11-21T00:27:19,891 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/15e5c95b4b234ac2aaab33c1f298b575 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/15e5c95b4b234ac2aaab33c1f298b575 2024-11-21T00:27:19,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0ef34c46c1d84f829ba7a657545989d2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0ef34c46c1d84f829ba7a657545989d2 2024-11-21T00:27:19,902 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into 15e5c95b4b234ac2aaab33c1f298b575(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:19,902 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:19,903 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=12, startTime=1732148839584; duration=0sec 2024-11-21T00:27:19,903 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:19,903 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:19,906 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0ef34c46c1d84f829ba7a657545989d2, entries=150, sequenceid=269, filesize=12.0 K 2024-11-21T00:27:19,908 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6103bc2a66018bd699c0a8ab668a67b7 in 217ms, sequenceid=269, compaction requested=true 2024-11-21T00:27:19,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:19,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:19,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-21T00:27:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-21T00:27:19,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-21T00:27:19,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 373 msec 2024-11-21T00:27:19,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 384 msec 2024-11-21T00:27:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:19,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-21T00:27:19,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:19,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:19,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:19,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:19,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/3ac41f1033c948aca3297c3de343fd10 is 50, key is test_row_0/A:col10/1732148839794/Put/seqid=0 2024-11-21T00:27:19,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741896_1072 (size=17181) 2024-11-21T00:27:20,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148900033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148900035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-21T00:27:20,138 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-21T00:27:20,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:20,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148900140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-21T00:27:20,143 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:20,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-21T00:27:20,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148900140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,145 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:20,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:20,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-21T00:27:20,297 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-21T00:27:20,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:20,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:20,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:20,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148900346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148900347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/3ac41f1033c948aca3297c3de343fd10 2024-11-21T00:27:20,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5834dbeb22134a75ae55a469a33b8d71 is 50, key is test_row_0/B:col10/1732148839794/Put/seqid=0 2024-11-21T00:27:20,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741897_1073 (size=12301) 2024-11-21T00:27:20,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5834dbeb22134a75ae55a469a33b8d71 2024-11-21T00:27:20,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0c1ee5ac46b045ffa11873ba8f946ebf is 50, key is test_row_0/C:col10/1732148839794/Put/seqid=0 2024-11-21T00:27:20,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-21T00:27:20,452 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-21T00:27:20,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:20,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,453 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:20,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:20,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741898_1074 (size=12301) 2024-11-21T00:27:20,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:20,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0c1ee5ac46b045ffa11873ba8f946ebf 2024-11-21T00:27:20,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/3ac41f1033c948aca3297c3de343fd10 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/3ac41f1033c948aca3297c3de343fd10 2024-11-21T00:27:20,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/3ac41f1033c948aca3297c3de343fd10, entries=250, sequenceid=288, filesize=16.8 K 2024-11-21T00:27:20,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5834dbeb22134a75ae55a469a33b8d71 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5834dbeb22134a75ae55a469a33b8d71 2024-11-21T00:27:20,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5834dbeb22134a75ae55a469a33b8d71, entries=150, sequenceid=288, filesize=12.0 K 2024-11-21T00:27:20,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0c1ee5ac46b045ffa11873ba8f946ebf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0c1ee5ac46b045ffa11873ba8f946ebf 2024-11-21T00:27:20,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0c1ee5ac46b045ffa11873ba8f946ebf, entries=150, sequenceid=288, filesize=12.0 K 2024-11-21T00:27:20,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 6103bc2a66018bd699c0a8ab668a67b7 in 573ms, sequenceid=288, compaction requested=true 2024-11-21T00:27:20,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:20,500 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:20,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:20,503 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:20,503 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54330 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:20,504 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:20,504 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,504 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4070b4542edb4476a77020b8c4ef83bf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9d32edb1635d4b98a831921507203342, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/cd716e7b8e9e44f9ae6a3b875b4915d0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/3ac41f1033c948aca3297c3de343fd10] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=53.1 K 2024-11-21T00:27:20,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:20,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:20,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:20,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:20,505 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4070b4542edb4476a77020b8c4ef83bf, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732148838094 2024-11-21T00:27:20,506 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:20,506 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:20,506 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,506 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/655b508040bd499c8d5948c475b6637f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/efc222aa709144c1a5ac24403ee6286b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/519d08a92c924353adb66931e624a86d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5834dbeb22134a75ae55a469a33b8d71] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=48.3 K 2024-11-21T00:27:20,506 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d32edb1635d4b98a831921507203342, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148838772 2024-11-21T00:27:20,507 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 655b508040bd499c8d5948c475b6637f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732148838094 2024-11-21T00:27:20,508 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd716e7b8e9e44f9ae6a3b875b4915d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732148839460 2024-11-21T00:27:20,508 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting efc222aa709144c1a5ac24403ee6286b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148838772 2024-11-21T00:27:20,509 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ac41f1033c948aca3297c3de343fd10, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148839794 2024-11-21T00:27:20,509 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 519d08a92c924353adb66931e624a86d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732148839460 2024-11-21T00:27:20,510 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5834dbeb22134a75ae55a469a33b8d71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148839794 2024-11-21T00:27:20,523 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:20,524 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/120a9d91dec847d1941750cf59693eb4 is 50, key is test_row_0/B:col10/1732148839794/Put/seqid=0 2024-11-21T00:27:20,536 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:20,536 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/66f3d72a26c642b78ff74d8238e14a63 is 50, key is test_row_0/A:col10/1732148839794/Put/seqid=0 2024-11-21T00:27:20,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741899_1075 (size=12983) 2024-11-21T00:27:20,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741900_1076 (size=12983) 2024-11-21T00:27:20,558 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/120a9d91dec847d1941750cf59693eb4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/120a9d91dec847d1941750cf59693eb4 2024-11-21T00:27:20,570 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into 120a9d91dec847d1941750cf59693eb4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:20,570 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:20,570 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=12, startTime=1732148840503; duration=0sec 2024-11-21T00:27:20,570 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:20,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:20,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:20,572 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:20,572 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:20,572 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,573 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/15e5c95b4b234ac2aaab33c1f298b575, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0ef34c46c1d84f829ba7a657545989d2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0c1ee5ac46b045ffa11873ba8f946ebf] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=36.5 K 2024-11-21T00:27:20,575 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 15e5c95b4b234ac2aaab33c1f298b575, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148838772 2024-11-21T00:27:20,576 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ef34c46c1d84f829ba7a657545989d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732148839460 2024-11-21T00:27:20,577 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c1ee5ac46b045ffa11873ba8f946ebf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148839794 2024-11-21T00:27:20,593 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#62 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:20,594 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ab0a61515da64051a3f9c11a55b05ee8 is 50, key is test_row_0/C:col10/1732148839794/Put/seqid=0 2024-11-21T00:27:20,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741901_1077 (size=12983) 2024-11-21T00:27:20,611 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-21T00:27:20,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:20,612 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-21T00:27:20,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:20,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:20,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:20,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:20,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:20,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:20,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/f28cdcb921c24c009cba0c7166cbd215 is 50, key is test_row_0/A:col10/1732148840033/Put/seqid=0 2024-11-21T00:27:20,632 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ab0a61515da64051a3f9c11a55b05ee8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab0a61515da64051a3f9c11a55b05ee8 2024-11-21T00:27:20,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741902_1078 (size=12301) 2024-11-21T00:27:20,657 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into ab0a61515da64051a3f9c11a55b05ee8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:20,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:20,657 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:20,658 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=13, startTime=1732148840504; duration=0sec 2024-11-21T00:27:20,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:20,658 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:20,658 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:20,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148900686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148900688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-21T00:27:20,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148900790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148900793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,960 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/66f3d72a26c642b78ff74d8238e14a63 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/66f3d72a26c642b78ff74d8238e14a63 2024-11-21T00:27:20,969 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into 66f3d72a26c642b78ff74d8238e14a63(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:20,970 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:20,970 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=12, startTime=1732148840500; duration=0sec 2024-11-21T00:27:20,970 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:20,970 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:20,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148900992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:20,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:20,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148900996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,038 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/f28cdcb921c24c009cba0c7166cbd215 2024-11-21T00:27:21,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45394 deadline: 1732148901047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,048 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8260 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:21,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/8b06d24e59094e6cb175a4733fbdaf27 is 50, key is test_row_0/B:col10/1732148840033/Put/seqid=0 2024-11-21T00:27:21,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45344 deadline: 1732148901072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,075 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8268 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:21,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741903_1079 (size=12301) 2024-11-21T00:27:21,083 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/8b06d24e59094e6cb175a4733fbdaf27 2024-11-21T00:27:21,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/e08678c5f03044fb863187bee77b0f09 is 50, key is test_row_0/C:col10/1732148840033/Put/seqid=0 2024-11-21T00:27:21,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741904_1080 (size=12301) 2024-11-21T00:27:21,115 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/e08678c5f03044fb863187bee77b0f09 2024-11-21T00:27:21,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/f28cdcb921c24c009cba0c7166cbd215 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/f28cdcb921c24c009cba0c7166cbd215 2024-11-21T00:27:21,134 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/f28cdcb921c24c009cba0c7166cbd215, entries=150, sequenceid=309, filesize=12.0 K 2024-11-21T00:27:21,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/8b06d24e59094e6cb175a4733fbdaf27 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8b06d24e59094e6cb175a4733fbdaf27 2024-11-21T00:27:21,144 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8b06d24e59094e6cb175a4733fbdaf27, entries=150, sequenceid=309, filesize=12.0 K 2024-11-21T00:27:21,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/e08678c5f03044fb863187bee77b0f09 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e08678c5f03044fb863187bee77b0f09 2024-11-21T00:27:21,162 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e08678c5f03044fb863187bee77b0f09, entries=150, sequenceid=309, filesize=12.0 K 2024-11-21T00:27:21,163 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6103bc2a66018bd699c0a8ab668a67b7 in 551ms, sequenceid=309, compaction requested=false 2024-11-21T00:27:21,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:21,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-21T00:27:21,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-21T00:27:21,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-21T00:27:21,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0200 sec 2024-11-21T00:27:21,168 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.0270 sec 2024-11-21T00:27:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-21T00:27:21,247 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-21T00:27:21,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:21,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-21T00:27:21,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-21T00:27:21,250 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:21,251 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:21,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:21,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-21T00:27:21,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:21,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:21,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:21,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:21,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:21,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:21,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/e8b3e0a752da42348164f89f8a913c42 is 50, key is test_row_0/A:col10/1732148841301/Put/seqid=0 2024-11-21T00:27:21,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741905_1081 (size=12301) 2024-11-21T00:27:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-21T00:27:21,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148901369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148901370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,403 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-21T00:27:21,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:21,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148901473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148901473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-21T00:27:21,556 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-21T00:27:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148901677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148901677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,710 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-21T00:27:21,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:21,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/e8b3e0a752da42348164f89f8a913c42 2024-11-21T00:27:21,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/3d2e2ae8216c4f469eca7362e6631bce is 50, key is test_row_0/B:col10/1732148841301/Put/seqid=0 2024-11-21T00:27:21,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741906_1082 (size=12301) 2024-11-21T00:27:21,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-21T00:27:21,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-21T00:27:21,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:21,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:21,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:21,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148901980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:21,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:21,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148901981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,016 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-21T00:27:22,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:22,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:22,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:22,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:22,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/3d2e2ae8216c4f469eca7362e6631bce 2024-11-21T00:27:22,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/7407f46aac5943249105e328e3cf4022 is 50, key is test_row_0/C:col10/1732148841301/Put/seqid=0 2024-11-21T00:27:22,170 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-21T00:27:22,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:22,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:22,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:22,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741907_1083 (size=12301) 2024-11-21T00:27:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:22,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/7407f46aac5943249105e328e3cf4022 2024-11-21T00:27:22,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/e8b3e0a752da42348164f89f8a913c42 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e8b3e0a752da42348164f89f8a913c42 2024-11-21T00:27:22,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e8b3e0a752da42348164f89f8a913c42, entries=150, sequenceid=328, filesize=12.0 K 2024-11-21T00:27:22,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/3d2e2ae8216c4f469eca7362e6631bce as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3d2e2ae8216c4f469eca7362e6631bce 2024-11-21T00:27:22,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3d2e2ae8216c4f469eca7362e6631bce, entries=150, sequenceid=328, filesize=12.0 K 2024-11-21T00:27:22,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/7407f46aac5943249105e328e3cf4022 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/7407f46aac5943249105e328e3cf4022 2024-11-21T00:27:22,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/7407f46aac5943249105e328e3cf4022, entries=150, sequenceid=328, filesize=12.0 K 2024-11-21T00:27:22,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6103bc2a66018bd699c0a8ab668a67b7 in 907ms, sequenceid=328, compaction requested=true 2024-11-21T00:27:22,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:22,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:22,209 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:22,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:22,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:22,209 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:22,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:22,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:22,211 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:22,211 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:22,211 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:22,212 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,212 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:22,212 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,212 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/66f3d72a26c642b78ff74d8238e14a63, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/f28cdcb921c24c009cba0c7166cbd215, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e8b3e0a752da42348164f89f8a913c42] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=36.7 K 2024-11-21T00:27:22,212 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/120a9d91dec847d1941750cf59693eb4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8b06d24e59094e6cb175a4733fbdaf27, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3d2e2ae8216c4f469eca7362e6631bce] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=36.7 K 2024-11-21T00:27:22,212 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 66f3d72a26c642b78ff74d8238e14a63, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148839794 2024-11-21T00:27:22,213 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 120a9d91dec847d1941750cf59693eb4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148839794 2024-11-21T00:27:22,214 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f28cdcb921c24c009cba0c7166cbd215, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732148840027 2024-11-21T00:27:22,214 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b06d24e59094e6cb175a4733fbdaf27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732148840027 2024-11-21T00:27:22,215 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e8b3e0a752da42348164f89f8a913c42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732148840679 2024-11-21T00:27:22,215 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d2e2ae8216c4f469eca7362e6631bce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732148840679 2024-11-21T00:27:22,229 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:22,230 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/190b19b5b613400488457bd6ca7fb558 is 50, key is test_row_0/A:col10/1732148841301/Put/seqid=0 2024-11-21T00:27:22,238 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:22,239 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/bcadbe01f27c4115a278e325de65a75b is 50, key is test_row_0/B:col10/1732148841301/Put/seqid=0 2024-11-21T00:27:22,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741908_1084 (size=13085) 2024-11-21T00:27:22,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741909_1085 (size=13085) 2024-11-21T00:27:22,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-21T00:27:22,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,324 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-21T00:27:22,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:22,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:22,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:22,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/8744a631ccb24d209b755bb98ce11341 is 50, key is test_row_0/A:col10/1732148841362/Put/seqid=0 2024-11-21T00:27:22,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741910_1086 (size=12301) 2024-11-21T00:27:22,349 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/8744a631ccb24d209b755bb98ce11341 2024-11-21T00:27:22,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-21T00:27:22,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5cecd01999d54f1084a628645905abd2 is 50, key is test_row_0/B:col10/1732148841362/Put/seqid=0 2024-11-21T00:27:22,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741911_1087 (size=12301) 2024-11-21T00:27:22,391 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5cecd01999d54f1084a628645905abd2 2024-11-21T00:27:22,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/6b3621bfade34708ba52a0db2a21da78 is 50, key is test_row_0/C:col10/1732148841362/Put/seqid=0 2024-11-21T00:27:22,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741912_1088 (size=12301) 2024-11-21T00:27:22,424 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/6b3621bfade34708ba52a0db2a21da78 2024-11-21T00:27:22,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/8744a631ccb24d209b755bb98ce11341 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/8744a631ccb24d209b755bb98ce11341 2024-11-21T00:27:22,441 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/8744a631ccb24d209b755bb98ce11341, entries=150, sequenceid=347, filesize=12.0 K 2024-11-21T00:27:22,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/5cecd01999d54f1084a628645905abd2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5cecd01999d54f1084a628645905abd2 2024-11-21T00:27:22,450 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5cecd01999d54f1084a628645905abd2, entries=150, sequenceid=347, filesize=12.0 K 2024-11-21T00:27:22,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/6b3621bfade34708ba52a0db2a21da78 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6b3621bfade34708ba52a0db2a21da78 2024-11-21T00:27:22,459 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6b3621bfade34708ba52a0db2a21da78, entries=150, sequenceid=347, filesize=12.0 K 2024-11-21T00:27:22,460 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 6103bc2a66018bd699c0a8ab668a67b7 in 136ms, sequenceid=347, compaction requested=true 2024-11-21T00:27:22,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-21T00:27:22,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-21T00:27:22,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-21T00:27:22,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2110 sec 2024-11-21T00:27:22,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.2160 sec 2024-11-21T00:27:22,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:22,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:27:22,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:22,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:22,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:22,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/7b8e05d606ab432ebdd3f34120cd1eed is 50, key is test_row_0/A:col10/1732148842497/Put/seqid=0 2024-11-21T00:27:22,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741913_1089 (size=17181) 2024-11-21T00:27:22,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/7b8e05d606ab432ebdd3f34120cd1eed 2024-11-21T00:27:22,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/c276fb07b6ea4e24beef7d94bcf65a0d is 50, key is test_row_0/B:col10/1732148842497/Put/seqid=0 2024-11-21T00:27:22,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148902586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148902588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741914_1090 (size=12301) 2024-11-21T00:27:22,604 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/c276fb07b6ea4e24beef7d94bcf65a0d 2024-11-21T00:27:22,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/27c87bafdaf84cc69f876251094704e0 is 50, key is test_row_0/C:col10/1732148842497/Put/seqid=0 2024-11-21T00:27:22,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741915_1091 (size=12301) 2024-11-21T00:27:22,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/27c87bafdaf84cc69f876251094704e0 2024-11-21T00:27:22,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/7b8e05d606ab432ebdd3f34120cd1eed as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/7b8e05d606ab432ebdd3f34120cd1eed 2024-11-21T00:27:22,687 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/190b19b5b613400488457bd6ca7fb558 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/190b19b5b613400488457bd6ca7fb558 2024-11-21T00:27:22,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148902691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/7b8e05d606ab432ebdd3f34120cd1eed, entries=250, sequenceid=359, filesize=16.8 K 2024-11-21T00:27:22,695 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/bcadbe01f27c4115a278e325de65a75b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/bcadbe01f27c4115a278e325de65a75b 2024-11-21T00:27:22,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/c276fb07b6ea4e24beef7d94bcf65a0d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c276fb07b6ea4e24beef7d94bcf65a0d 2024-11-21T00:27:22,697 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into 190b19b5b613400488457bd6ca7fb558(size=12.8 K), total size for store is 41.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:22,698 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,698 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=13, startTime=1732148842209; duration=0sec 2024-11-21T00:27:22,698 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:22,698 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:22,698 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:22,701 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:22,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:22,701 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:22,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148902693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,701 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,701 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab0a61515da64051a3f9c11a55b05ee8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e08678c5f03044fb863187bee77b0f09, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/7407f46aac5943249105e328e3cf4022, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6b3621bfade34708ba52a0db2a21da78] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=48.7 K 2024-11-21T00:27:22,703 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ab0a61515da64051a3f9c11a55b05ee8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148839794 2024-11-21T00:27:22,704 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e08678c5f03044fb863187bee77b0f09, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732148840027 2024-11-21T00:27:22,705 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7407f46aac5943249105e328e3cf4022, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732148840679 2024-11-21T00:27:22,706 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b3621bfade34708ba52a0db2a21da78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732148841362 2024-11-21T00:27:22,706 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into bcadbe01f27c4115a278e325de65a75b(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:22,706 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,706 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=13, startTime=1732148842209; duration=0sec 2024-11-21T00:27:22,707 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:22,707 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:22,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c276fb07b6ea4e24beef7d94bcf65a0d, entries=150, sequenceid=359, filesize=12.0 K 2024-11-21T00:27:22,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/27c87bafdaf84cc69f876251094704e0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/27c87bafdaf84cc69f876251094704e0 2024-11-21T00:27:22,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/27c87bafdaf84cc69f876251094704e0, entries=150, sequenceid=359, filesize=12.0 K 2024-11-21T00:27:22,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6103bc2a66018bd699c0a8ab668a67b7 in 218ms, sequenceid=359, compaction requested=true 2024-11-21T00:27:22,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:22,724 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:22,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:22,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:22,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:22,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:22,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:27:22,725 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42567 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:22,725 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:22,725 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,726 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/190b19b5b613400488457bd6ca7fb558, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/8744a631ccb24d209b755bb98ce11341, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/7b8e05d606ab432ebdd3f34120cd1eed] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=41.6 K 2024-11-21T00:27:22,726 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 190b19b5b613400488457bd6ca7fb558, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732148840679 2024-11-21T00:27:22,726 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#77 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:22,727 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8744a631ccb24d209b755bb98ce11341, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732148841362 2024-11-21T00:27:22,727 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/c9b4babb847743af859b5adecff256ec is 50, key is test_row_0/C:col10/1732148841362/Put/seqid=0 2024-11-21T00:27:22,727 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b8e05d606ab432ebdd3f34120cd1eed, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148842493 2024-11-21T00:27:22,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741916_1092 (size=13119) 2024-11-21T00:27:22,744 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:22,745 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/c0b4bd910f134a2f8d886b2b7e4479e0 is 50, key is test_row_0/A:col10/1732148842497/Put/seqid=0 2024-11-21T00:27:22,756 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/c9b4babb847743af859b5adecff256ec as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c9b4babb847743af859b5adecff256ec 2024-11-21T00:27:22,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741917_1093 (size=13187) 2024-11-21T00:27:22,765 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into c9b4babb847743af859b5adecff256ec(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:22,765 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,765 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=12, startTime=1732148842209; duration=0sec 2024-11-21T00:27:22,765 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:27:22,765 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:22,765 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:22,765 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:22,767 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:22,767 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:22,768 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:22,768 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/bcadbe01f27c4115a278e325de65a75b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5cecd01999d54f1084a628645905abd2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c276fb07b6ea4e24beef7d94bcf65a0d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=36.8 K 2024-11-21T00:27:22,771 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting bcadbe01f27c4115a278e325de65a75b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732148840679 2024-11-21T00:27:22,771 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cecd01999d54f1084a628645905abd2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732148841362 2024-11-21T00:27:22,772 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/c0b4bd910f134a2f8d886b2b7e4479e0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c0b4bd910f134a2f8d886b2b7e4479e0 2024-11-21T00:27:22,772 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c276fb07b6ea4e24beef7d94bcf65a0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148842497 2024-11-21T00:27:22,782 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into c0b4bd910f134a2f8d886b2b7e4479e0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:22,782 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,782 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=13, startTime=1732148842724; duration=0sec 2024-11-21T00:27:22,782 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:22,782 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:22,783 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:27:22,784 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:22,784 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:22,784 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:22,784 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:22,785 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:22,785 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/86d911fb01c74aacbe8f214ccbae7a15 is 50, key is test_row_0/B:col10/1732148842497/Put/seqid=0 2024-11-21T00:27:22,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741918_1094 (size=13187) 2024-11-21T00:27:22,804 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/86d911fb01c74aacbe8f214ccbae7a15 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/86d911fb01c74aacbe8f214ccbae7a15 2024-11-21T00:27:22,814 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into 86d911fb01c74aacbe8f214ccbae7a15(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:22,814 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:22,814 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=13, startTime=1732148842724; duration=0sec 2024-11-21T00:27:22,815 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:22,815 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:22,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:22,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:27:22,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:22,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:22,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:22,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:22,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148902910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/87ad121c48cc4e849fde22d96c5ab57c is 50, key is test_row_0/A:col10/1732148842896/Put/seqid=0 2024-11-21T00:27:22,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148902913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:22,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741919_1095 (size=14741) 2024-11-21T00:27:23,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:23,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148903014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:23,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148903017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:23,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148903216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:23,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148903220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/87ad121c48cc4e849fde22d96c5ab57c 2024-11-21T00:27:23,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/a372b5ba8f0e49a59488b256baff31d9 is 50, key is test_row_0/B:col10/1732148842896/Put/seqid=0 2024-11-21T00:27:23,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741920_1096 (size=12301) 2024-11-21T00:27:23,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-21T00:27:23,355 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-21T00:27:23,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/a372b5ba8f0e49a59488b256baff31d9 2024-11-21T00:27:23,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:23,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-21T00:27:23,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-21T00:27:23,360 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:23,361 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:23,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:23,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/918b2585e6084b9391be2b84c6b9ab76 is 50, key is test_row_0/C:col10/1732148842896/Put/seqid=0 2024-11-21T00:27:23,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741921_1097 (size=12301) 2024-11-21T00:27:23,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-21T00:27:23,514 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-21T00:27:23,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:23,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:23,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148903520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:23,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148903523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-21T00:27:23,668 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-21T00:27:23,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:23,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/918b2585e6084b9391be2b84c6b9ab76 2024-11-21T00:27:23,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/87ad121c48cc4e849fde22d96c5ab57c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/87ad121c48cc4e849fde22d96c5ab57c 2024-11-21T00:27:23,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/87ad121c48cc4e849fde22d96c5ab57c, entries=200, sequenceid=389, filesize=14.4 K 2024-11-21T00:27:23,820 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-21T00:27:23,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:23,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:23,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/a372b5ba8f0e49a59488b256baff31d9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a372b5ba8f0e49a59488b256baff31d9 2024-11-21T00:27:23,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a372b5ba8f0e49a59488b256baff31d9, entries=150, sequenceid=389, filesize=12.0 K 2024-11-21T00:27:23,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/918b2585e6084b9391be2b84c6b9ab76 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/918b2585e6084b9391be2b84c6b9ab76 2024-11-21T00:27:23,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/918b2585e6084b9391be2b84c6b9ab76, entries=150, sequenceid=389, filesize=12.0 K 2024-11-21T00:27:23,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6103bc2a66018bd699c0a8ab668a67b7 in 951ms, sequenceid=389, compaction requested=true 2024-11-21T00:27:23,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:23,848 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:27:23,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:23,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:23,849 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:27:23,849 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:23,849 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:23,849 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:23,849 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:23,850 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:27:23,850 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:27:23,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:23,850 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. because compaction request was cancelled 2024-11-21T00:27:23,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:23,850 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:23,850 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:23,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:23,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:23,851 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:23,851 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:23,851 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,851 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c9b4babb847743af859b5adecff256ec, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/27c87bafdaf84cc69f876251094704e0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/918b2585e6084b9391be2b84c6b9ab76] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=36.8 K 2024-11-21T00:27:23,852 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9b4babb847743af859b5adecff256ec, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732148841362 2024-11-21T00:27:23,853 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27c87bafdaf84cc69f876251094704e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148842497 2024-11-21T00:27:23,853 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 918b2585e6084b9391be2b84c6b9ab76, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732148842558 2024-11-21T00:27:23,864 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#83 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:23,864 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/a087b4c0ac334ad88e31970318a0affe is 50, key is test_row_0/C:col10/1732148842896/Put/seqid=0 2024-11-21T00:27:23,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741922_1098 (size=13221) 2024-11-21T00:27:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-21T00:27:23,973 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:23,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-21T00:27:23,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:23,974 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:27:23,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:23,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:23,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:23,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:23,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:23,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:23,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/0cd855aafb2742d582c8ca327e47e712 is 50, key is test_row_0/A:col10/1732148842908/Put/seqid=0 2024-11-21T00:27:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741923_1099 (size=12301) 2024-11-21T00:27:24,002 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/0cd855aafb2742d582c8ca327e47e712 2024-11-21T00:27:24,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/ccad69e203a34bd5bccb63b4995d5ade is 50, key is test_row_0/B:col10/1732148842908/Put/seqid=0 2024-11-21T00:27:24,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741924_1100 (size=12301) 2024-11-21T00:27:24,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:24,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:24,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148904071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148904072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148904177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148904178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,283 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/a087b4c0ac334ad88e31970318a0affe as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/a087b4c0ac334ad88e31970318a0affe 2024-11-21T00:27:24,297 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into a087b4c0ac334ad88e31970318a0affe(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:24,298 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:24,298 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=13, startTime=1732148843850; duration=0sec 2024-11-21T00:27:24,298 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:24,298 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:24,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148904379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148904381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,422 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/ccad69e203a34bd5bccb63b4995d5ade 2024-11-21T00:27:24,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/1afe92935ac64434972bb849dd069d24 is 50, key is test_row_0/C:col10/1732148842908/Put/seqid=0 2024-11-21T00:27:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-21T00:27:24,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741925_1101 (size=12301) 2024-11-21T00:27:24,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148904682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148904685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:24,877 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/1afe92935ac64434972bb849dd069d24 2024-11-21T00:27:24,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/0cd855aafb2742d582c8ca327e47e712 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0cd855aafb2742d582c8ca327e47e712 2024-11-21T00:27:24,891 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0cd855aafb2742d582c8ca327e47e712, entries=150, sequenceid=400, filesize=12.0 K 2024-11-21T00:27:24,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/ccad69e203a34bd5bccb63b4995d5ade as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ccad69e203a34bd5bccb63b4995d5ade 2024-11-21T00:27:24,900 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ccad69e203a34bd5bccb63b4995d5ade, entries=150, sequenceid=400, filesize=12.0 K 2024-11-21T00:27:24,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/1afe92935ac64434972bb849dd069d24 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1afe92935ac64434972bb849dd069d24 2024-11-21T00:27:24,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1afe92935ac64434972bb849dd069d24, entries=150, sequenceid=400, filesize=12.0 K 2024-11-21T00:27:24,912 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6103bc2a66018bd699c0a8ab668a67b7 in 939ms, sequenceid=400, compaction requested=true 2024-11-21T00:27:24,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:24,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:24,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-21T00:27:24,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-21T00:27:24,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-21T00:27:24,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5530 sec 2024-11-21T00:27:24,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.5600 sec 2024-11-21T00:27:25,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:25,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-21T00:27:25,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:25,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:25,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:25,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:25,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:25,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:25,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/2cd2772d9e0248f9be99cc0ef2d11bb8 is 50, key is test_row_0/A:col10/1732148844071/Put/seqid=0 2024-11-21T00:27:25,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741926_1102 (size=12301) 2024-11-21T00:27:25,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148905204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148905205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148905306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148905307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-21T00:27:25,465 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-21T00:27:25,466 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-21T00:27:25,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-21T00:27:25,468 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:25,469 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:25,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:25,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148905510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148905510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-21T00:27:25,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/2cd2772d9e0248f9be99cc0ef2d11bb8 2024-11-21T00:27:25,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/daf8582ba66e4004848f21186a0e1887 is 50, key is test_row_0/B:col10/1732148844071/Put/seqid=0 2024-11-21T00:27:25,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-21T00:27:25,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:25,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:25,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:25,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741927_1103 (size=12301) 2024-11-21T00:27:25,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/daf8582ba66e4004848f21186a0e1887 2024-11-21T00:27:25,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/8cabd00cea8d474581c8588606f6a7c8 is 50, key is test_row_0/C:col10/1732148844071/Put/seqid=0 2024-11-21T00:27:25,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741928_1104 (size=12301) 2024-11-21T00:27:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-21T00:27:25,773 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-21T00:27:25,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:25,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:25,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:25,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45404 deadline: 1732148905781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,782 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18372 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:25,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148905812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148905812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,926 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:25,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-21T00:27:25,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:25,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:25,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:25,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:26,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/8cabd00cea8d474581c8588606f6a7c8 2024-11-21T00:27:26,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/2cd2772d9e0248f9be99cc0ef2d11bb8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/2cd2772d9e0248f9be99cc0ef2d11bb8 2024-11-21T00:27:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-21T00:27:26,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/2cd2772d9e0248f9be99cc0ef2d11bb8, entries=150, sequenceid=427, filesize=12.0 K 2024-11-21T00:27:26,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/daf8582ba66e4004848f21186a0e1887 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/daf8582ba66e4004848f21186a0e1887 2024-11-21T00:27:26,079 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-21T00:27:26,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:26,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:26,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:26,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:26,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:26,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/daf8582ba66e4004848f21186a0e1887, entries=150, sequenceid=427, filesize=12.0 K 2024-11-21T00:27:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/8cabd00cea8d474581c8588606f6a7c8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/8cabd00cea8d474581c8588606f6a7c8 2024-11-21T00:27:26,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/8cabd00cea8d474581c8588606f6a7c8, entries=150, sequenceid=427, filesize=12.0 K 2024-11-21T00:27:26,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6103bc2a66018bd699c0a8ab668a67b7 in 915ms, sequenceid=427, compaction requested=true 2024-11-21T00:27:26,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:26,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:26,106 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:26,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:26,106 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:26,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:26,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:26,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:26,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:26,108 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52530 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:26,108 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:26,108 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:26,108 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c0b4bd910f134a2f8d886b2b7e4479e0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/87ad121c48cc4e849fde22d96c5ab57c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0cd855aafb2742d582c8ca327e47e712, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/2cd2772d9e0248f9be99cc0ef2d11bb8] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=51.3 K 2024-11-21T00:27:26,109 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0b4bd910f134a2f8d886b2b7e4479e0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148842497 2024-11-21T00:27:26,109 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:26,109 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:26,109 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:26,109 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/86d911fb01c74aacbe8f214ccbae7a15, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a372b5ba8f0e49a59488b256baff31d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ccad69e203a34bd5bccb63b4995d5ade, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/daf8582ba66e4004848f21186a0e1887] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=48.9 K 2024-11-21T00:27:26,109 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87ad121c48cc4e849fde22d96c5ab57c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732148842558 2024-11-21T00:27:26,110 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cd855aafb2742d582c8ca327e47e712, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1732148842905 2024-11-21T00:27:26,110 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cd2772d9e0248f9be99cc0ef2d11bb8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1732148844065 2024-11-21T00:27:26,111 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 86d911fb01c74aacbe8f214ccbae7a15, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148842497 2024-11-21T00:27:26,111 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting a372b5ba8f0e49a59488b256baff31d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732148842558 2024-11-21T00:27:26,112 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ccad69e203a34bd5bccb63b4995d5ade, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1732148842905 2024-11-21T00:27:26,112 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting daf8582ba66e4004848f21186a0e1887, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1732148844065 2024-11-21T00:27:26,122 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:26,124 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/b46653aba96242d4a3bf5cde028b8b72 is 50, key is test_row_0/A:col10/1732148844071/Put/seqid=0 2024-11-21T00:27:26,126 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:26,127 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/3b5ce974a6594eac8ca4a082ed2a41de is 50, key is test_row_0/B:col10/1732148844071/Put/seqid=0 2024-11-21T00:27:26,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741929_1105 (size=13323) 2024-11-21T00:27:26,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741930_1106 (size=13323) 2024-11-21T00:27:26,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-21T00:27:26,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:26,233 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-21T00:27:26,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:26,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:26,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:26,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:26,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:26,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/9c748b71c5ca46aeae112da2e9693ae6 is 50, key is test_row_0/A:col10/1732148845203/Put/seqid=0 2024-11-21T00:27:26,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741931_1107 (size=9857) 2024-11-21T00:27:26,245 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/9c748b71c5ca46aeae112da2e9693ae6 2024-11-21T00:27:26,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/b76fbf0976ad4823a3dfc9f8050fa9d9 is 50, key is test_row_0/B:col10/1732148845203/Put/seqid=0 2024-11-21T00:27:26,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741932_1108 (size=9857) 2024-11-21T00:27:26,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:26,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. as already flushing 2024-11-21T00:27:26,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:26,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148906367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:26,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148906369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:26,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148906471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:26,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148906473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,547 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/b46653aba96242d4a3bf5cde028b8b72 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b46653aba96242d4a3bf5cde028b8b72 2024-11-21T00:27:26,555 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/3b5ce974a6594eac8ca4a082ed2a41de as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3b5ce974a6594eac8ca4a082ed2a41de 2024-11-21T00:27:26,557 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into b46653aba96242d4a3bf5cde028b8b72(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:26,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:26,557 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=12, startTime=1732148846106; duration=0sec 2024-11-21T00:27:26,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:26,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:26,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:26,558 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:26,559 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:26,559 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:26,559 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/a087b4c0ac334ad88e31970318a0affe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1afe92935ac64434972bb849dd069d24, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/8cabd00cea8d474581c8588606f6a7c8] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=36.9 K 2024-11-21T00:27:26,560 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a087b4c0ac334ad88e31970318a0affe, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1732148842558 2024-11-21T00:27:26,561 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1afe92935ac64434972bb849dd069d24, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1732148842905 2024-11-21T00:27:26,562 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cabd00cea8d474581c8588606f6a7c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1732148844065 2024-11-21T00:27:26,562 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into 3b5ce974a6594eac8ca4a082ed2a41de(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:26,562 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:26,562 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=12, startTime=1732148846106; duration=0sec 2024-11-21T00:27:26,563 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:26,563 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:26,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-21T00:27:26,573 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#94 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:26,573 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/1bfbe9df66c943ed910cf944bba11337 is 50, key is test_row_0/C:col10/1732148844071/Put/seqid=0 2024-11-21T00:27:26,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741933_1109 (size=13323) 2024-11-21T00:27:26,597 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/1bfbe9df66c943ed910cf944bba11337 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1bfbe9df66c943ed910cf944bba11337 2024-11-21T00:27:26,608 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into 1bfbe9df66c943ed910cf944bba11337(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:26,608 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:26,608 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=13, startTime=1732148846106; duration=0sec 2024-11-21T00:27:26,608 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:26,609 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:26,673 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/b76fbf0976ad4823a3dfc9f8050fa9d9 2024-11-21T00:27:26,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:26,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45376 deadline: 1732148906675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:26,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732148906677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:26,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/2d3aa257801a4b3c90f0b6af5170eef6 is 50, key is test_row_0/C:col10/1732148845203/Put/seqid=0 2024-11-21T00:27:26,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741934_1110 (size=9857) 2024-11-21T00:27:26,696 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/2d3aa257801a4b3c90f0b6af5170eef6 2024-11-21T00:27:26,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/9c748b71c5ca46aeae112da2e9693ae6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9c748b71c5ca46aeae112da2e9693ae6 2024-11-21T00:27:26,714 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9c748b71c5ca46aeae112da2e9693ae6, entries=100, sequenceid=437, filesize=9.6 K 2024-11-21T00:27:26,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/b76fbf0976ad4823a3dfc9f8050fa9d9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/b76fbf0976ad4823a3dfc9f8050fa9d9 2024-11-21T00:27:26,731 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/b76fbf0976ad4823a3dfc9f8050fa9d9, entries=100, sequenceid=437, filesize=9.6 K 2024-11-21T00:27:26,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/2d3aa257801a4b3c90f0b6af5170eef6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/2d3aa257801a4b3c90f0b6af5170eef6 2024-11-21T00:27:26,755 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/2d3aa257801a4b3c90f0b6af5170eef6, entries=100, sequenceid=437, filesize=9.6 K 2024-11-21T00:27:26,757 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 6103bc2a66018bd699c0a8ab668a67b7 in 523ms, sequenceid=437, compaction requested=false 2024-11-21T00:27:26,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:26,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:26,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-21T00:27:26,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-21T00:27:26,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-21T00:27:26,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2900 sec 2024-11-21T00:27:26,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.2960 sec 2024-11-21T00:27:26,836 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39028e20 to 127.0.0.1:64241 2024-11-21T00:27:26,836 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:26,836 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22daddc4 to 127.0.0.1:64241 2024-11-21T00:27:26,837 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4e957ecd to 127.0.0.1:64241 2024-11-21T00:27:26,837 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:26,837 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:26,837 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x624dc5e5 to 127.0.0.1:64241 2024-11-21T00:27:26,837 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:26,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:26,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-21T00:27:26,982 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x536a4a58 to 127.0.0.1:64241 2024-11-21T00:27:26,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:26,982 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:26,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:26,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:26,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:26,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:26,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:26,985 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x435176b2 to 127.0.0.1:64241 2024-11-21T00:27:26,985 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:26,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1a4639f4b304448ba02804cadbe63e49 is 50, key is test_row_0/A:col10/1732148846366/Put/seqid=0 2024-11-21T00:27:26,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741935_1111 (size=12301) 2024-11-21T00:27:27,297 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:27,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1a4639f4b304448ba02804cadbe63e49 2024-11-21T00:27:27,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/794f4c40cc184b7690c9159dc02facbb is 50, key is test_row_0/B:col10/1732148846366/Put/seqid=0 2024-11-21T00:27:27,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741936_1112 (size=12301) 2024-11-21T00:27:27,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-21T00:27:27,574 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-21T00:27:27,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/794f4c40cc184b7690c9159dc02facbb 2024-11-21T00:27:27,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/5e69fcddff79427a92b938295c198384 is 50, key is test_row_0/C:col10/1732148846366/Put/seqid=0 2024-11-21T00:27:27,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741937_1113 (size=12301) 2024-11-21T00:27:28,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/5e69fcddff79427a92b938295c198384 2024-11-21T00:27:28,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1a4639f4b304448ba02804cadbe63e49 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a4639f4b304448ba02804cadbe63e49 2024-11-21T00:27:28,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a4639f4b304448ba02804cadbe63e49, entries=150, sequenceid=467, filesize=12.0 K 2024-11-21T00:27:28,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/794f4c40cc184b7690c9159dc02facbb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/794f4c40cc184b7690c9159dc02facbb 2024-11-21T00:27:28,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/794f4c40cc184b7690c9159dc02facbb, entries=150, sequenceid=467, filesize=12.0 K 2024-11-21T00:27:28,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/5e69fcddff79427a92b938295c198384 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/5e69fcddff79427a92b938295c198384 2024-11-21T00:27:28,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/5e69fcddff79427a92b938295c198384, entries=150, sequenceid=467, filesize=12.0 K 2024-11-21T00:27:28,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=6.71 KB/6870 for 6103bc2a66018bd699c0a8ab668a67b7 in 1274ms, sequenceid=467, compaction requested=true 2024-11-21T00:27:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:28,256 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:28,256 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6103bc2a66018bd699c0a8ab668a67b7:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:28,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:28,257 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:28,257 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:28,257 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/A is initiating minor compaction (all files) 2024-11-21T00:27:28,257 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/B is initiating minor compaction (all files) 2024-11-21T00:27:28,257 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/B in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:28,257 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/A in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:28,258 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3b5ce974a6594eac8ca4a082ed2a41de, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/b76fbf0976ad4823a3dfc9f8050fa9d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/794f4c40cc184b7690c9159dc02facbb] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=34.6 K 2024-11-21T00:27:28,258 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b46653aba96242d4a3bf5cde028b8b72, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9c748b71c5ca46aeae112da2e9693ae6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a4639f4b304448ba02804cadbe63e49] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=34.6 K 2024-11-21T00:27:28,258 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b5ce974a6594eac8ca4a082ed2a41de, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1732148844065 2024-11-21T00:27:28,258 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b46653aba96242d4a3bf5cde028b8b72, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1732148844065 2024-11-21T00:27:28,258 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b76fbf0976ad4823a3dfc9f8050fa9d9, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148845203 2024-11-21T00:27:28,258 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c748b71c5ca46aeae112da2e9693ae6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148845203 2024-11-21T00:27:28,259 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a4639f4b304448ba02804cadbe63e49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732148846365 2024-11-21T00:27:28,259 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 794f4c40cc184b7690c9159dc02facbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732148846365 2024-11-21T00:27:28,266 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#B#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:28,266 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#A#compaction#100 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:28,266 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1a27ade47a314bd2ad041ff367fed18e is 50, key is test_row_0/A:col10/1732148846366/Put/seqid=0 2024-11-21T00:27:28,267 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/8579eb26937a48dba25e4a2467bd23ba is 50, key is test_row_0/B:col10/1732148846366/Put/seqid=0 2024-11-21T00:27:28,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741938_1114 (size=13425) 2024-11-21T00:27:28,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741939_1115 (size=13425) 2024-11-21T00:27:28,676 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/1a27ade47a314bd2ad041ff367fed18e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a27ade47a314bd2ad041ff367fed18e 2024-11-21T00:27:28,677 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/8579eb26937a48dba25e4a2467bd23ba as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8579eb26937a48dba25e4a2467bd23ba 2024-11-21T00:27:28,682 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/B of 6103bc2a66018bd699c0a8ab668a67b7 into 8579eb26937a48dba25e4a2467bd23ba(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:28,682 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/A of 6103bc2a66018bd699c0a8ab668a67b7 into 1a27ade47a314bd2ad041ff367fed18e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:28,682 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:28,682 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:28,682 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/B, priority=13, startTime=1732148848256; duration=0sec 2024-11-21T00:27:28,682 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/A, priority=13, startTime=1732148848256; duration=0sec 2024-11-21T00:27:28,682 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:28,682 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:B 2024-11-21T00:27:28,682 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:28,683 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:28,683 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:A 2024-11-21T00:27:28,684 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:28,684 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 6103bc2a66018bd699c0a8ab668a67b7/C is initiating minor compaction (all files) 2024-11-21T00:27:28,684 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6103bc2a66018bd699c0a8ab668a67b7/C in TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:28,684 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1bfbe9df66c943ed910cf944bba11337, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/2d3aa257801a4b3c90f0b6af5170eef6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/5e69fcddff79427a92b938295c198384] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp, totalSize=34.6 K 2024-11-21T00:27:28,685 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bfbe9df66c943ed910cf944bba11337, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1732148844065 2024-11-21T00:27:28,685 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d3aa257801a4b3c90f0b6af5170eef6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148845203 2024-11-21T00:27:28,685 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e69fcddff79427a92b938295c198384, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732148846365 2024-11-21T00:27:28,693 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6103bc2a66018bd699c0a8ab668a67b7#C#compaction#101 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:28,694 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ab16578967c84e339cf0453be59ca4de is 50, key is test_row_0/C:col10/1732148846366/Put/seqid=0 2024-11-21T00:27:28,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741940_1116 (size=13425) 2024-11-21T00:27:29,110 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/ab16578967c84e339cf0453be59ca4de as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab16578967c84e339cf0453be59ca4de 2024-11-21T00:27:29,118 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6103bc2a66018bd699c0a8ab668a67b7/C of 6103bc2a66018bd699c0a8ab668a67b7 into ab16578967c84e339cf0453be59ca4de(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:29,118 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:29,118 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7., storeName=6103bc2a66018bd699c0a8ab668a67b7/C, priority=13, startTime=1732148848256; duration=0sec 2024-11-21T00:27:29,118 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:29,118 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6103bc2a66018bd699c0a8ab668a67b7:C 2024-11-21T00:27:31,063 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2dd0bbda to 127.0.0.1:64241 2024-11-21T00:27:31,063 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:31,090 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18f2a76d to 127.0.0.1:64241 2024-11-21T00:27:31,090 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:34,298 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:27:34,301 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:27:35,839 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3888ad7c to 127.0.0.1:64241 2024-11-21T00:27:35,839 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 161 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 158 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3625 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3674 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1610 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4827 rows 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1612 2024-11-21T00:27:35,840 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4834 rows 2024-11-21T00:27:35,840 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-21T00:27:35,840 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x30640414 to 127.0.0.1:64241 2024-11-21T00:27:35,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:35,844 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-21T00:27:35,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-21T00:27:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:35,865 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148855865"}]},"ts":"1732148855865"} 2024-11-21T00:27:35,870 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-21T00:27:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-21T00:27:35,883 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-21T00:27:35,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:27:35,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6103bc2a66018bd699c0a8ab668a67b7, UNASSIGN}] 2024-11-21T00:27:35,897 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6103bc2a66018bd699c0a8ab668a67b7, UNASSIGN 2024-11-21T00:27:35,898 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=6103bc2a66018bd699c0a8ab668a67b7, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:35,901 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:27:35,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure 6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:27:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-21T00:27:36,060 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:36,063 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:36,063 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:27:36,064 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing 6103bc2a66018bd699c0a8ab668a67b7, disabling compactions & flushes 2024-11-21T00:27:36,064 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:36,064 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:36,064 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. after waiting 0 ms 2024-11-21T00:27:36,064 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:36,064 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing 6103bc2a66018bd699c0a8ab668a67b7 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-21T00:27:36,064 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=A 2024-11-21T00:27:36,064 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:36,065 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=B 2024-11-21T00:27:36,065 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:36,065 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6103bc2a66018bd699c0a8ab668a67b7, store=C 2024-11-21T00:27:36,065 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:36,071 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/a220035917c54191b1830b7b698d678d is 50, key is test_row_0/A:col10/1732148855837/Put/seqid=0 2024-11-21T00:27:36,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741941_1117 (size=12301) 2024-11-21T00:27:36,077 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/a220035917c54191b1830b7b698d678d 2024-11-21T00:27:36,095 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/7d52724f10c345ccac54d918f6ec84d9 is 50, key is test_row_0/B:col10/1732148855837/Put/seqid=0 2024-11-21T00:27:36,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741942_1118 (size=12301) 2024-11-21T00:27:36,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-21T00:27:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-21T00:27:36,505 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/7d52724f10c345ccac54d918f6ec84d9 2024-11-21T00:27:36,514 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0f557e25410c4722becc009e36b9eac3 is 50, key is test_row_0/C:col10/1732148855837/Put/seqid=0 2024-11-21T00:27:36,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741943_1119 (size=12301) 2024-11-21T00:27:36,923 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0f557e25410c4722becc009e36b9eac3 2024-11-21T00:27:36,928 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/A/a220035917c54191b1830b7b698d678d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/a220035917c54191b1830b7b698d678d 2024-11-21T00:27:36,934 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/a220035917c54191b1830b7b698d678d, entries=150, sequenceid=477, filesize=12.0 K 2024-11-21T00:27:36,936 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/B/7d52724f10c345ccac54d918f6ec84d9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/7d52724f10c345ccac54d918f6ec84d9 2024-11-21T00:27:36,941 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/7d52724f10c345ccac54d918f6ec84d9, entries=150, sequenceid=477, filesize=12.0 K 2024-11-21T00:27:36,943 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/.tmp/C/0f557e25410c4722becc009e36b9eac3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0f557e25410c4722becc009e36b9eac3 2024-11-21T00:27:36,950 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0f557e25410c4722becc009e36b9eac3, entries=150, sequenceid=477, filesize=12.0 K 2024-11-21T00:27:36,951 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 6103bc2a66018bd699c0a8ab668a67b7 in 887ms, sequenceid=477, compaction requested=false 2024-11-21T00:27:36,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/29b9d65a1e614797bbecf0b7586a81cf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0a4fd3cf85154978b31f3dab9988cfde, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/586d1d8ac3454c5da0ac92ed02be9a91, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/5d4cbf95fa2b4877bdaf260f62a64353, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4ecc2bff61354e779cb75c305a2c2bec, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/fd2213efdd3f434283281c336a25b93e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/75e2f1f2fd6d49809045ba0a14cd9d92, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c559fafb727d402388f1f9da50e92006, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1849cfef9e9542b888cfc0d1f5b434ca, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/596e47d4435f4c94b3215c98ec1adfeb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e957432f6b97469a963d5033235f5cb0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b5118c8b4d2144e3b18051cea0651d08, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b24d16631e69417db90d38b0d57e7242, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/ff0734378c4242f0a5c19979e612bc6e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/20e360dbcb93470face4097212a052ef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4070b4542edb4476a77020b8c4ef83bf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/edceee44a132450eaacf544fb9ec348c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9d32edb1635d4b98a831921507203342, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/cd716e7b8e9e44f9ae6a3b875b4915d0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/3ac41f1033c948aca3297c3de343fd10, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/66f3d72a26c642b78ff74d8238e14a63, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/f28cdcb921c24c009cba0c7166cbd215, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/190b19b5b613400488457bd6ca7fb558, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e8b3e0a752da42348164f89f8a913c42, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/8744a631ccb24d209b755bb98ce11341, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/7b8e05d606ab432ebdd3f34120cd1eed, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c0b4bd910f134a2f8d886b2b7e4479e0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/87ad121c48cc4e849fde22d96c5ab57c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0cd855aafb2742d582c8ca327e47e712, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b46653aba96242d4a3bf5cde028b8b72, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/2cd2772d9e0248f9be99cc0ef2d11bb8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9c748b71c5ca46aeae112da2e9693ae6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a4639f4b304448ba02804cadbe63e49] to archive 2024-11-21T00:27:36,956 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:27:36,964 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/29b9d65a1e614797bbecf0b7586a81cf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/29b9d65a1e614797bbecf0b7586a81cf 2024-11-21T00:27:36,966 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0a4fd3cf85154978b31f3dab9988cfde to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0a4fd3cf85154978b31f3dab9988cfde 2024-11-21T00:27:36,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/586d1d8ac3454c5da0ac92ed02be9a91 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/586d1d8ac3454c5da0ac92ed02be9a91 2024-11-21T00:27:36,976 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/5d4cbf95fa2b4877bdaf260f62a64353 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/5d4cbf95fa2b4877bdaf260f62a64353 2024-11-21T00:27:36,978 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4ecc2bff61354e779cb75c305a2c2bec to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4ecc2bff61354e779cb75c305a2c2bec 2024-11-21T00:27:36,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/fd2213efdd3f434283281c336a25b93e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/fd2213efdd3f434283281c336a25b93e 2024-11-21T00:27:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-21T00:27:36,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/75e2f1f2fd6d49809045ba0a14cd9d92 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/75e2f1f2fd6d49809045ba0a14cd9d92 2024-11-21T00:27:36,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c559fafb727d402388f1f9da50e92006 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c559fafb727d402388f1f9da50e92006 2024-11-21T00:27:36,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1849cfef9e9542b888cfc0d1f5b434ca to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1849cfef9e9542b888cfc0d1f5b434ca 2024-11-21T00:27:36,986 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/596e47d4435f4c94b3215c98ec1adfeb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/596e47d4435f4c94b3215c98ec1adfeb 2024-11-21T00:27:36,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e957432f6b97469a963d5033235f5cb0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e957432f6b97469a963d5033235f5cb0 2024-11-21T00:27:36,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b5118c8b4d2144e3b18051cea0651d08 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b5118c8b4d2144e3b18051cea0651d08 2024-11-21T00:27:36,992 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b24d16631e69417db90d38b0d57e7242 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b24d16631e69417db90d38b0d57e7242 2024-11-21T00:27:36,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/ff0734378c4242f0a5c19979e612bc6e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/ff0734378c4242f0a5c19979e612bc6e 2024-11-21T00:27:37,002 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/20e360dbcb93470face4097212a052ef to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/20e360dbcb93470face4097212a052ef 2024-11-21T00:27:37,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4070b4542edb4476a77020b8c4ef83bf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/4070b4542edb4476a77020b8c4ef83bf 2024-11-21T00:27:37,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/edceee44a132450eaacf544fb9ec348c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/edceee44a132450eaacf544fb9ec348c 2024-11-21T00:27:37,011 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9d32edb1635d4b98a831921507203342 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9d32edb1635d4b98a831921507203342 2024-11-21T00:27:37,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/cd716e7b8e9e44f9ae6a3b875b4915d0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/cd716e7b8e9e44f9ae6a3b875b4915d0 2024-11-21T00:27:37,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/3ac41f1033c948aca3297c3de343fd10 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/3ac41f1033c948aca3297c3de343fd10 2024-11-21T00:27:37,023 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/66f3d72a26c642b78ff74d8238e14a63 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/66f3d72a26c642b78ff74d8238e14a63 2024-11-21T00:27:37,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/f28cdcb921c24c009cba0c7166cbd215 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/f28cdcb921c24c009cba0c7166cbd215 2024-11-21T00:27:37,026 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/190b19b5b613400488457bd6ca7fb558 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/190b19b5b613400488457bd6ca7fb558 2024-11-21T00:27:37,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e8b3e0a752da42348164f89f8a913c42 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/e8b3e0a752da42348164f89f8a913c42 2024-11-21T00:27:37,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/8744a631ccb24d209b755bb98ce11341 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/8744a631ccb24d209b755bb98ce11341 2024-11-21T00:27:37,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/7b8e05d606ab432ebdd3f34120cd1eed to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/7b8e05d606ab432ebdd3f34120cd1eed 2024-11-21T00:27:37,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c0b4bd910f134a2f8d886b2b7e4479e0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/c0b4bd910f134a2f8d886b2b7e4479e0 2024-11-21T00:27:37,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/87ad121c48cc4e849fde22d96c5ab57c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/87ad121c48cc4e849fde22d96c5ab57c 2024-11-21T00:27:37,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0cd855aafb2742d582c8ca327e47e712 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/0cd855aafb2742d582c8ca327e47e712 2024-11-21T00:27:37,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b46653aba96242d4a3bf5cde028b8b72 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/b46653aba96242d4a3bf5cde028b8b72 2024-11-21T00:27:37,051 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/2cd2772d9e0248f9be99cc0ef2d11bb8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/2cd2772d9e0248f9be99cc0ef2d11bb8 2024-11-21T00:27:37,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9c748b71c5ca46aeae112da2e9693ae6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/9c748b71c5ca46aeae112da2e9693ae6 2024-11-21T00:27:37,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a4639f4b304448ba02804cadbe63e49 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a4639f4b304448ba02804cadbe63e49 2024-11-21T00:27:37,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a293ae71a18b4445a684fce1d3bd24aa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63a71c57fe4a42909caa1bf0e1f49ba8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c42f11b7b7134e979f91047056422401, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/0fc7298451a74887b9799cf230d354db, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/33144f25f2334fc5a907cdd8e0ae516a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ebd0ec4f809c489fb6e8e7de88f674ab, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/f77a1265657141d28b084ff7f67f27c0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5d483b9b53914776aa41d55ad79bec9d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/6cfeeedb15d04364a5facf93e9350793, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63c3f2deff3b45ecae792f19d0126226, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5138891cb78f4d9bac21cc3a015f8962, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/81d70bb99c15415ab8009a8ac26f0cde, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/fac2656ceb0d4ffdbab6ddf2a283a702, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/23b3d87afe1a4807a0211d77427de2df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/e8d06f12296a4fb88a8c3a9421c9ec68, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/655b508040bd499c8d5948c475b6637f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/310f918b94d6443d96f3977b24a20a7b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/efc222aa709144c1a5ac24403ee6286b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/519d08a92c924353adb66931e624a86d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/120a9d91dec847d1941750cf59693eb4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5834dbeb22134a75ae55a469a33b8d71, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8b06d24e59094e6cb175a4733fbdaf27, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/bcadbe01f27c4115a278e325de65a75b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3d2e2ae8216c4f469eca7362e6631bce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5cecd01999d54f1084a628645905abd2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/86d911fb01c74aacbe8f214ccbae7a15, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c276fb07b6ea4e24beef7d94bcf65a0d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a372b5ba8f0e49a59488b256baff31d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ccad69e203a34bd5bccb63b4995d5ade, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3b5ce974a6594eac8ca4a082ed2a41de, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/daf8582ba66e4004848f21186a0e1887, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/b76fbf0976ad4823a3dfc9f8050fa9d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/794f4c40cc184b7690c9159dc02facbb] to archive 2024-11-21T00:27:37,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:27:37,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a293ae71a18b4445a684fce1d3bd24aa to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a293ae71a18b4445a684fce1d3bd24aa 2024-11-21T00:27:37,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63a71c57fe4a42909caa1bf0e1f49ba8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63a71c57fe4a42909caa1bf0e1f49ba8 2024-11-21T00:27:37,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c42f11b7b7134e979f91047056422401 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c42f11b7b7134e979f91047056422401 2024-11-21T00:27:37,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/0fc7298451a74887b9799cf230d354db to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/0fc7298451a74887b9799cf230d354db 2024-11-21T00:27:37,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/33144f25f2334fc5a907cdd8e0ae516a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/33144f25f2334fc5a907cdd8e0ae516a 2024-11-21T00:27:37,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ebd0ec4f809c489fb6e8e7de88f674ab to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ebd0ec4f809c489fb6e8e7de88f674ab 2024-11-21T00:27:37,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/f77a1265657141d28b084ff7f67f27c0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/f77a1265657141d28b084ff7f67f27c0 2024-11-21T00:27:37,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5d483b9b53914776aa41d55ad79bec9d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5d483b9b53914776aa41d55ad79bec9d 2024-11-21T00:27:37,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/6cfeeedb15d04364a5facf93e9350793 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/6cfeeedb15d04364a5facf93e9350793 2024-11-21T00:27:37,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63c3f2deff3b45ecae792f19d0126226 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/63c3f2deff3b45ecae792f19d0126226 2024-11-21T00:27:37,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5138891cb78f4d9bac21cc3a015f8962 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5138891cb78f4d9bac21cc3a015f8962 2024-11-21T00:27:37,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/81d70bb99c15415ab8009a8ac26f0cde to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/81d70bb99c15415ab8009a8ac26f0cde 2024-11-21T00:27:37,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/fac2656ceb0d4ffdbab6ddf2a283a702 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/fac2656ceb0d4ffdbab6ddf2a283a702 2024-11-21T00:27:37,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/23b3d87afe1a4807a0211d77427de2df to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/23b3d87afe1a4807a0211d77427de2df 2024-11-21T00:27:37,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/e8d06f12296a4fb88a8c3a9421c9ec68 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/e8d06f12296a4fb88a8c3a9421c9ec68 2024-11-21T00:27:37,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/655b508040bd499c8d5948c475b6637f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/655b508040bd499c8d5948c475b6637f 2024-11-21T00:27:37,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/310f918b94d6443d96f3977b24a20a7b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/310f918b94d6443d96f3977b24a20a7b 2024-11-21T00:27:37,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/efc222aa709144c1a5ac24403ee6286b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/efc222aa709144c1a5ac24403ee6286b 2024-11-21T00:27:37,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/519d08a92c924353adb66931e624a86d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/519d08a92c924353adb66931e624a86d 2024-11-21T00:27:37,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/120a9d91dec847d1941750cf59693eb4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/120a9d91dec847d1941750cf59693eb4 2024-11-21T00:27:37,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5834dbeb22134a75ae55a469a33b8d71 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5834dbeb22134a75ae55a469a33b8d71 2024-11-21T00:27:37,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8b06d24e59094e6cb175a4733fbdaf27 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8b06d24e59094e6cb175a4733fbdaf27 2024-11-21T00:27:37,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/bcadbe01f27c4115a278e325de65a75b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/bcadbe01f27c4115a278e325de65a75b 2024-11-21T00:27:37,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3d2e2ae8216c4f469eca7362e6631bce to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3d2e2ae8216c4f469eca7362e6631bce 2024-11-21T00:27:37,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5cecd01999d54f1084a628645905abd2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/5cecd01999d54f1084a628645905abd2 2024-11-21T00:27:37,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/86d911fb01c74aacbe8f214ccbae7a15 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/86d911fb01c74aacbe8f214ccbae7a15 2024-11-21T00:27:37,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c276fb07b6ea4e24beef7d94bcf65a0d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/c276fb07b6ea4e24beef7d94bcf65a0d 2024-11-21T00:27:37,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a372b5ba8f0e49a59488b256baff31d9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/a372b5ba8f0e49a59488b256baff31d9 2024-11-21T00:27:37,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ccad69e203a34bd5bccb63b4995d5ade to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/ccad69e203a34bd5bccb63b4995d5ade 2024-11-21T00:27:37,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3b5ce974a6594eac8ca4a082ed2a41de to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/3b5ce974a6594eac8ca4a082ed2a41de 2024-11-21T00:27:37,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/daf8582ba66e4004848f21186a0e1887 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/daf8582ba66e4004848f21186a0e1887 2024-11-21T00:27:37,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/b76fbf0976ad4823a3dfc9f8050fa9d9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/b76fbf0976ad4823a3dfc9f8050fa9d9 2024-11-21T00:27:37,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/794f4c40cc184b7690c9159dc02facbb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/794f4c40cc184b7690c9159dc02facbb 2024-11-21T00:27:37,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c75edd7dbc3e47ce9b5f9c9857617e59, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3bf800e133ca423ebbf0e5be8e73016d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4ae801f8a06d4e299af3a1671656df68, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0837b515c0e64fa291a4b0b47d37d646, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6bea144f2f344503b337a8e577fc56cc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f330e8b6f035409aa4a3484cc2903bf1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b33ff699c02449809ff276a8f42c9c34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ccd790e1199b484aac8222340212e615, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/542a308a694440f2aa2648e664e44372, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3fa7683d74db4663b040a5ebe0d94837, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/984fd39907744b2d8822d60d1b94c43e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b785fc8496d54b7dbeec8d7ac70c8156, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4284908554ef43e890f662a1ea09e969, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/9340bd109ebc4623996114de60b4bd4e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f09bc1262b5b4c13b8a562d2feedbb33, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e77664447a68400eb3083869101841c3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/15e5c95b4b234ac2aaab33c1f298b575, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ba30f2a9c0754f37b02a09717eddce04, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0ef34c46c1d84f829ba7a657545989d2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab0a61515da64051a3f9c11a55b05ee8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0c1ee5ac46b045ffa11873ba8f946ebf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e08678c5f03044fb863187bee77b0f09, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/7407f46aac5943249105e328e3cf4022, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c9b4babb847743af859b5adecff256ec, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6b3621bfade34708ba52a0db2a21da78, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/27c87bafdaf84cc69f876251094704e0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/a087b4c0ac334ad88e31970318a0affe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/918b2585e6084b9391be2b84c6b9ab76, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1afe92935ac64434972bb849dd069d24, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1bfbe9df66c943ed910cf944bba11337, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/8cabd00cea8d474581c8588606f6a7c8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/2d3aa257801a4b3c90f0b6af5170eef6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/5e69fcddff79427a92b938295c198384] to archive 2024-11-21T00:27:37,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:27:37,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c75edd7dbc3e47ce9b5f9c9857617e59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c75edd7dbc3e47ce9b5f9c9857617e59 2024-11-21T00:27:37,208 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3bf800e133ca423ebbf0e5be8e73016d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3bf800e133ca423ebbf0e5be8e73016d 2024-11-21T00:27:37,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4ae801f8a06d4e299af3a1671656df68 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4ae801f8a06d4e299af3a1671656df68 2024-11-21T00:27:37,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0837b515c0e64fa291a4b0b47d37d646 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0837b515c0e64fa291a4b0b47d37d646 2024-11-21T00:27:37,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6bea144f2f344503b337a8e577fc56cc to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6bea144f2f344503b337a8e577fc56cc 2024-11-21T00:27:37,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f330e8b6f035409aa4a3484cc2903bf1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f330e8b6f035409aa4a3484cc2903bf1 2024-11-21T00:27:37,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b33ff699c02449809ff276a8f42c9c34 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b33ff699c02449809ff276a8f42c9c34 2024-11-21T00:27:37,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ccd790e1199b484aac8222340212e615 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ccd790e1199b484aac8222340212e615 2024-11-21T00:27:37,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/542a308a694440f2aa2648e664e44372 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/542a308a694440f2aa2648e664e44372 2024-11-21T00:27:37,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3fa7683d74db4663b040a5ebe0d94837 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/3fa7683d74db4663b040a5ebe0d94837 2024-11-21T00:27:37,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/984fd39907744b2d8822d60d1b94c43e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/984fd39907744b2d8822d60d1b94c43e 2024-11-21T00:27:37,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b785fc8496d54b7dbeec8d7ac70c8156 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/b785fc8496d54b7dbeec8d7ac70c8156 2024-11-21T00:27:37,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4284908554ef43e890f662a1ea09e969 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/4284908554ef43e890f662a1ea09e969 2024-11-21T00:27:37,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/9340bd109ebc4623996114de60b4bd4e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/9340bd109ebc4623996114de60b4bd4e 2024-11-21T00:27:37,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f09bc1262b5b4c13b8a562d2feedbb33 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/f09bc1262b5b4c13b8a562d2feedbb33 2024-11-21T00:27:37,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e77664447a68400eb3083869101841c3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e77664447a68400eb3083869101841c3 2024-11-21T00:27:37,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/15e5c95b4b234ac2aaab33c1f298b575 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/15e5c95b4b234ac2aaab33c1f298b575 2024-11-21T00:27:37,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ba30f2a9c0754f37b02a09717eddce04 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ba30f2a9c0754f37b02a09717eddce04 2024-11-21T00:27:37,243 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0ef34c46c1d84f829ba7a657545989d2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0ef34c46c1d84f829ba7a657545989d2 2024-11-21T00:27:37,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab0a61515da64051a3f9c11a55b05ee8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab0a61515da64051a3f9c11a55b05ee8 2024-11-21T00:27:37,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0c1ee5ac46b045ffa11873ba8f946ebf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0c1ee5ac46b045ffa11873ba8f946ebf 2024-11-21T00:27:37,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e08678c5f03044fb863187bee77b0f09 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/e08678c5f03044fb863187bee77b0f09 2024-11-21T00:27:37,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/7407f46aac5943249105e328e3cf4022 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/7407f46aac5943249105e328e3cf4022 2024-11-21T00:27:37,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c9b4babb847743af859b5adecff256ec to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/c9b4babb847743af859b5adecff256ec 2024-11-21T00:27:37,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6b3621bfade34708ba52a0db2a21da78 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/6b3621bfade34708ba52a0db2a21da78 2024-11-21T00:27:37,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/27c87bafdaf84cc69f876251094704e0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/27c87bafdaf84cc69f876251094704e0 2024-11-21T00:27:37,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/a087b4c0ac334ad88e31970318a0affe to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/a087b4c0ac334ad88e31970318a0affe 2024-11-21T00:27:37,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/918b2585e6084b9391be2b84c6b9ab76 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/918b2585e6084b9391be2b84c6b9ab76 2024-11-21T00:27:37,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1afe92935ac64434972bb849dd069d24 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1afe92935ac64434972bb849dd069d24 2024-11-21T00:27:37,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1bfbe9df66c943ed910cf944bba11337 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/1bfbe9df66c943ed910cf944bba11337 2024-11-21T00:27:37,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/8cabd00cea8d474581c8588606f6a7c8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/8cabd00cea8d474581c8588606f6a7c8 2024-11-21T00:27:37,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/2d3aa257801a4b3c90f0b6af5170eef6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/2d3aa257801a4b3c90f0b6af5170eef6 2024-11-21T00:27:37,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/5e69fcddff79427a92b938295c198384 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/5e69fcddff79427a92b938295c198384 2024-11-21T00:27:37,275 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/recovered.edits/480.seqid, newMaxSeqId=480, maxSeqId=1 2024-11-21T00:27:37,278 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7. 2024-11-21T00:27:37,278 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for 6103bc2a66018bd699c0a8ab668a67b7: 2024-11-21T00:27:37,280 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed 6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:37,280 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=6103bc2a66018bd699c0a8ab668a67b7, regionState=CLOSED 2024-11-21T00:27:37,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-21T00:27:37,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure 6103bc2a66018bd699c0a8ab668a67b7, server=0e7930017ff8,37961,1732148819586 in 1.3790 sec 2024-11-21T00:27:37,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-11-21T00:27:37,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6103bc2a66018bd699c0a8ab668a67b7, UNASSIGN in 1.3950 sec 2024-11-21T00:27:37,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-21T00:27:37,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4080 sec 2024-11-21T00:27:37,295 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148857295"}]},"ts":"1732148857295"} 2024-11-21T00:27:37,296 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-21T00:27:37,307 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-21T00:27:37,309 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4530 sec 2024-11-21T00:27:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-21T00:27:37,982 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-21T00:27:37,985 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-21T00:27:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:37,991 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-21T00:27:37,993 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:37,997 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:38,001 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/recovered.edits] 2024-11-21T00:27:38,005 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a27ade47a314bd2ad041ff367fed18e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/1a27ade47a314bd2ad041ff367fed18e 2024-11-21T00:27:38,007 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/a220035917c54191b1830b7b698d678d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/A/a220035917c54191b1830b7b698d678d 2024-11-21T00:27:38,010 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/7d52724f10c345ccac54d918f6ec84d9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/7d52724f10c345ccac54d918f6ec84d9 2024-11-21T00:27:38,012 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8579eb26937a48dba25e4a2467bd23ba to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/B/8579eb26937a48dba25e4a2467bd23ba 2024-11-21T00:27:38,015 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0f557e25410c4722becc009e36b9eac3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/0f557e25410c4722becc009e36b9eac3 2024-11-21T00:27:38,016 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab16578967c84e339cf0453be59ca4de to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/C/ab16578967c84e339cf0453be59ca4de 2024-11-21T00:27:38,020 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/recovered.edits/480.seqid to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7/recovered.edits/480.seqid 2024-11-21T00:27:38,021 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/6103bc2a66018bd699c0a8ab668a67b7 2024-11-21T00:27:38,021 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-21T00:27:38,027 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:38,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-21T00:27:38,034 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-21T00:27:38,085 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-21T00:27:38,088 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:38,089 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-21T00:27:38,089 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732148858089"}]},"ts":"9223372036854775807"} 2024-11-21T00:27:38,095 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-21T00:27:38,095 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6103bc2a66018bd699c0a8ab668a67b7, NAME => 'TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T00:27:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-21T00:27:38,095 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-21T00:27:38,096 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732148858095"}]},"ts":"9223372036854775807"} 2024-11-21T00:27:38,106 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-21T00:27:38,150 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:38,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 164 msec 2024-11-21T00:27:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-21T00:27:38,297 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-21T00:27:38,309 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;0e7930017ff8:37961-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1912749785_22 at /127.0.0.1:39864 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=596 (was 542) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3227 (was 3934) 2024-11-21T00:27:38,320 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=596, ProcessCount=11, AvailableMemoryMB=3227 2024-11-21T00:27:38,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:27:38,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:38,326 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:38,326 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:38,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-11-21T00:27:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-21T00:27:38,328 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:38,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741944_1120 (size=963) 2024-11-21T00:27:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-21T00:27:38,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-21T00:27:38,756 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:27:38,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741945_1121 (size=53) 2024-11-21T00:27:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-21T00:27:38,968 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-21T00:27:39,189 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:39,189 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 542dd37a0a64b62316f21779fd913b59, disabling compactions & flushes 2024-11-21T00:27:39,189 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,189 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,189 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. after waiting 0 ms 2024-11-21T00:27:39,189 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,189 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,189 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:39,190 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:39,190 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732148859190"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148859190"}]},"ts":"1732148859190"} 2024-11-21T00:27:39,192 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-21T00:27:39,193 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:39,193 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148859193"}]},"ts":"1732148859193"} 2024-11-21T00:27:39,195 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-21T00:27:39,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, ASSIGN}] 2024-11-21T00:27:39,213 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, ASSIGN 2024-11-21T00:27:39,214 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, ASSIGN; state=OFFLINE, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=false 2024-11-21T00:27:39,365 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:39,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:27:39,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-21T00:27:39,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:39,520 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,520 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:39,521 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,521 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:39,521 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,521 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,523 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,524 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:39,524 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 542dd37a0a64b62316f21779fd913b59 columnFamilyName A 2024-11-21T00:27:39,524 DEBUG [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:39,525 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(327): Store=542dd37a0a64b62316f21779fd913b59/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:39,525 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,526 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:39,527 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 542dd37a0a64b62316f21779fd913b59 columnFamilyName B 2024-11-21T00:27:39,527 DEBUG [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:39,528 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(327): Store=542dd37a0a64b62316f21779fd913b59/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:39,528 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,529 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:39,529 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 542dd37a0a64b62316f21779fd913b59 columnFamilyName C 2024-11-21T00:27:39,529 DEBUG [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:39,530 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(327): Store=542dd37a0a64b62316f21779fd913b59/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:39,530 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,530 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,531 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,533 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:27:39,535 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:39,536 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:39,537 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened 542dd37a0a64b62316f21779fd913b59; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66316500, jitterRate=-0.011807143688201904}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:27:39,538 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:39,538 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., pid=39, masterSystemTime=1732148859517 2024-11-21T00:27:39,540 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,540 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:39,540 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:39,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-21T00:27:39,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 in 176 msec 2024-11-21T00:27:39,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-21T00:27:39,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, ASSIGN in 335 msec 2024-11-21T00:27:39,550 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:39,551 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148859550"}]},"ts":"1732148859550"} 2024-11-21T00:27:39,552 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-21T00:27:39,563 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:39,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2390 sec 2024-11-21T00:27:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-21T00:27:40,434 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-11-21T00:27:40,436 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3242ee55 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d3b0c59 2024-11-21T00:27:40,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a1a4a6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:40,493 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:40,495 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:40,497 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:40,499 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:40,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:27:40,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-21T00:27:40,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741946_1122 (size=999) 2024-11-21T00:27:40,931 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-21T00:27:40,931 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-21T00:27:40,935 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:27:40,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, REOPEN/MOVE}] 2024-11-21T00:27:40,946 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, REOPEN/MOVE 2024-11-21T00:27:40,947 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:40,952 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:27:40,952 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:27:41,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,105 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,105 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:27:41,105 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing 542dd37a0a64b62316f21779fd913b59, disabling compactions & flushes 2024-11-21T00:27:41,105 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,105 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,105 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. after waiting 0 ms 2024-11-21T00:27:41,105 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,108 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-21T00:27:41,109 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,109 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:41,109 WARN [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: 542dd37a0a64b62316f21779fd913b59 to self. 2024-11-21T00:27:41,111 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,111 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=CLOSED 2024-11-21T00:27:41,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-21T00:27:41,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 in 160 msec 2024-11-21T00:27:41,114 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, REOPEN/MOVE; state=CLOSED, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=true 2024-11-21T00:27:41,264 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:27:41,417 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,422 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,423 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:41,423 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,423 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:41,423 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,423 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,426 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,427 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:41,434 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 542dd37a0a64b62316f21779fd913b59 columnFamilyName A 2024-11-21T00:27:41,436 DEBUG [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:41,437 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(327): Store=542dd37a0a64b62316f21779fd913b59/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:41,438 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,438 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:41,439 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 542dd37a0a64b62316f21779fd913b59 columnFamilyName B 2024-11-21T00:27:41,439 DEBUG [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:41,439 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(327): Store=542dd37a0a64b62316f21779fd913b59/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:41,440 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,440 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:27:41,441 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 542dd37a0a64b62316f21779fd913b59 columnFamilyName C 2024-11-21T00:27:41,441 DEBUG [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:41,441 INFO [StoreOpener-542dd37a0a64b62316f21779fd913b59-1 {}] regionserver.HStore(327): Store=542dd37a0a64b62316f21779fd913b59/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:41,441 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,442 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,444 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,446 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:27:41,449 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,451 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 542dd37a0a64b62316f21779fd913b59; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72041118, jitterRate=0.07349631190299988}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:27:41,452 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:41,453 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., pid=44, masterSystemTime=1732148861417 2024-11-21T00:27:41,455 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,456 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,456 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=OPEN, openSeqNum=5, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-11-21T00:27:41,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 in 192 msec 2024-11-21T00:27:41,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-21T00:27:41,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, REOPEN/MOVE in 513 msec 2024-11-21T00:27:41,465 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-21T00:27:41,465 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 528 msec 2024-11-21T00:27:41,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 959 msec 2024-11-21T00:27:41,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-21T00:27:41,479 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2aa409d0 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@515fd839 2024-11-21T00:27:41,518 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d006bed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,520 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53af6163 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@748ab582 2024-11-21T00:27:41,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f4859f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,535 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15736fcc to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@35b51e5d 2024-11-21T00:27:41,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1eb823f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,552 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32168855 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@74be9bc0 2024-11-21T00:27:41,567 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a0312cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,569 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40832d66 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@776c0cb7 2024-11-21T00:27:41,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@555bfdff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,582 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ec46f90 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@347ad9b2 2024-11-21T00:27:41,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d5e0e3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,593 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f63b68c to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d36579b 2024-11-21T00:27:41,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70f48df4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,639 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x473f181f to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@681a05ec 2024-11-21T00:27:41,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd5be36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,669 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x768577a2 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e0829fb 2024-11-21T00:27:41,692 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fbd1a02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:41,703 DEBUG [hconnection-0x272ddab2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,704 DEBUG [hconnection-0x3dd60a43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:41,706 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,707 DEBUG [hconnection-0x7381168c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,707 DEBUG [hconnection-0x4289d2cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,708 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47722, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-21T00:27:41,710 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-21T00:27:41,710 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:41,716 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:41,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:41,719 DEBUG [hconnection-0x610d282c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,720 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,727 DEBUG [hconnection-0x5d8fb141-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,729 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,731 DEBUG [hconnection-0x24793937-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,732 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:27:41,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:41,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:41,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:41,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:41,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:41,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:41,742 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,743 DEBUG [hconnection-0x4ca3fb43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,745 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,759 DEBUG [hconnection-0x401ccf74-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:41,772 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47800, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:41,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148921792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148921790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148921795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148921795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148921800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-21T00:27:41,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121fd9fa88565cf4a75a4fed6bf6feb309d_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148861726/Put/seqid=0 2024-11-21T00:27:41,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-21T00:27:41,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741947_1123 (size=12154) 2024-11-21T00:27:41,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:41,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:41,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:41,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:41,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:41,886 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:41,897 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121fd9fa88565cf4a75a4fed6bf6feb309d_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121fd9fa88565cf4a75a4fed6bf6feb309d_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:41,898 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/55a444baf680485482422cb0c1de086f, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:41,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148921904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148921904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148921904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/55a444baf680485482422cb0c1de086f is 175, key is test_row_0/A:col10/1732148861726/Put/seqid=0 2024-11-21T00:27:41,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148921905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:41,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148921906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:41,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741948_1124 (size=30955) 2024-11-21T00:27:41,954 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/55a444baf680485482422cb0c1de086f 2024-11-21T00:27:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-21T00:27:42,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/97d964d4b6444daba3de2c5a61e9376d is 50, key is test_row_0/B:col10/1732148861726/Put/seqid=0 2024-11-21T00:27:42,037 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-21T00:27:42,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:42,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:42,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:42,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:42,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:42,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741949_1125 (size=12001) 2024-11-21T00:27:42,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/97d964d4b6444daba3de2c5a61e9376d 2024-11-21T00:27:42,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148922116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148922116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148922116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148922120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148922126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/844831a09bbd4e238017df022eba1e93 is 50, key is test_row_0/C:col10/1732148861726/Put/seqid=0 2024-11-21T00:27:42,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741950_1126 (size=12001) 2024-11-21T00:27:42,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/844831a09bbd4e238017df022eba1e93 2024-11-21T00:27:42,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/55a444baf680485482422cb0c1de086f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/55a444baf680485482422cb0c1de086f 2024-11-21T00:27:42,198 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-21T00:27:42,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:42,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:42,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:42,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:42,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:42,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/55a444baf680485482422cb0c1de086f, entries=150, sequenceid=16, filesize=30.2 K 2024-11-21T00:27:42,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/97d964d4b6444daba3de2c5a61e9376d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/97d964d4b6444daba3de2c5a61e9376d 2024-11-21T00:27:42,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/97d964d4b6444daba3de2c5a61e9376d, entries=150, sequenceid=16, filesize=11.7 K 2024-11-21T00:27:42,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/844831a09bbd4e238017df022eba1e93 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/844831a09bbd4e238017df022eba1e93 2024-11-21T00:27:42,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/844831a09bbd4e238017df022eba1e93, entries=150, sequenceid=16, filesize=11.7 K 2024-11-21T00:27:42,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 542dd37a0a64b62316f21779fd913b59 in 491ms, sequenceid=16, compaction requested=false 2024-11-21T00:27:42,224 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-21T00:27:42,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-21T00:27:42,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-21T00:27:42,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:42,360 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-21T00:27:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:42,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ca30f32c187e468fb793a8ea426a7c0e_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148861793/Put/seqid=0 2024-11-21T00:27:42,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:42,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148922441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148922446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148922447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148922452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148922452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741951_1127 (size=12154) 2024-11-21T00:27:42,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:42,490 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ca30f32c187e468fb793a8ea426a7c0e_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ca30f32c187e468fb793a8ea426a7c0e_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:42,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2590078960ad43fa9121715b9b0b3d6a, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:42,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2590078960ad43fa9121715b9b0b3d6a is 175, key is test_row_0/A:col10/1732148861793/Put/seqid=0 2024-11-21T00:27:42,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148922557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148922558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148922558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741952_1128 (size=30955) 2024-11-21T00:27:42,565 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2590078960ad43fa9121715b9b0b3d6a 2024-11-21T00:27:42,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148922567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148922569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/b0a4163b98cd4e39a313a2c66967ad0e is 50, key is test_row_0/B:col10/1732148861793/Put/seqid=0 2024-11-21T00:27:42,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741953_1129 (size=12001) 2024-11-21T00:27:42,690 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/b0a4163b98cd4e39a313a2c66967ad0e 2024-11-21T00:27:42,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/9d97f630096345bcb0061f6a4f460eaf is 50, key is test_row_0/C:col10/1732148861793/Put/seqid=0 2024-11-21T00:27:42,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148922760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148922767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148922767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148922776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148922795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:42,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741954_1130 (size=12001) 2024-11-21T00:27:42,801 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/9d97f630096345bcb0061f6a4f460eaf 2024-11-21T00:27:42,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2590078960ad43fa9121715b9b0b3d6a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2590078960ad43fa9121715b9b0b3d6a 2024-11-21T00:27:42,820 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2590078960ad43fa9121715b9b0b3d6a, entries=150, sequenceid=42, filesize=30.2 K 2024-11-21T00:27:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-21T00:27:42,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/b0a4163b98cd4e39a313a2c66967ad0e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/b0a4163b98cd4e39a313a2c66967ad0e 2024-11-21T00:27:42,832 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/b0a4163b98cd4e39a313a2c66967ad0e, entries=150, sequenceid=42, filesize=11.7 K 2024-11-21T00:27:42,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/9d97f630096345bcb0061f6a4f460eaf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/9d97f630096345bcb0061f6a4f460eaf 2024-11-21T00:27:42,850 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/9d97f630096345bcb0061f6a4f460eaf, entries=150, sequenceid=42, filesize=11.7 K 2024-11-21T00:27:42,854 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 542dd37a0a64b62316f21779fd913b59 in 494ms, sequenceid=42, compaction requested=false 2024-11-21T00:27:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:42,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:42,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-21T00:27:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-21T00:27:42,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-21T00:27:42,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1400 sec 2024-11-21T00:27:42,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.1540 sec 2024-11-21T00:27:42,885 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-21T00:27:43,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:27:43,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:43,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:43,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:43,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:43,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:43,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:43,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:43,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112111a251505cae418e92fc98818cb5183e_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148862429/Put/seqid=0 2024-11-21T00:27:43,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148923152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148923155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148923157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148923151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148923163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741955_1131 (size=14594) 2024-11-21T00:27:43,224 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:43,243 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112111a251505cae418e92fc98818cb5183e_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112111a251505cae418e92fc98818cb5183e_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:43,251 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/50f1a37e61114c1a9fe30888508c4274, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:43,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/50f1a37e61114c1a9fe30888508c4274 is 175, key is test_row_0/A:col10/1732148862429/Put/seqid=0 2024-11-21T00:27:43,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148923272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148923287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148923294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148923304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148923304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741956_1132 (size=39549) 2024-11-21T00:27:43,332 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/50f1a37e61114c1a9fe30888508c4274 2024-11-21T00:27:43,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/91ffe764c8304f7fae542f8e7abfc295 is 50, key is test_row_0/B:col10/1732148862429/Put/seqid=0 2024-11-21T00:27:43,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148923483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148923494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148923511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741957_1133 (size=12001) 2024-11-21T00:27:43,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148923511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148923516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148923807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148923807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-21T00:27:43,825 INFO [Thread-621 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-21T00:27:43,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148923827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148923832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:43,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-21T00:27:43,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:43,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148923835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,838 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:43,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-21T00:27:43,839 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:43,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:43,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/91ffe764c8304f7fae542f8e7abfc295 2024-11-21T00:27:43,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-21T00:27:43,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/4b6c1bca14274600b7cba7daea0bbc27 is 50, key is test_row_0/C:col10/1732148862429/Put/seqid=0 2024-11-21T00:27:43,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:43,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-21T00:27:43,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:43,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:43,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:43,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:43,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:43,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:44,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741958_1134 (size=12001) 2024-11-21T00:27:44,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/4b6c1bca14274600b7cba7daea0bbc27 2024-11-21T00:27:44,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/50f1a37e61114c1a9fe30888508c4274 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/50f1a37e61114c1a9fe30888508c4274 2024-11-21T00:27:44,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/50f1a37e61114c1a9fe30888508c4274, entries=200, sequenceid=54, filesize=38.6 K 2024-11-21T00:27:44,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/91ffe764c8304f7fae542f8e7abfc295 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/91ffe764c8304f7fae542f8e7abfc295 2024-11-21T00:27:44,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/91ffe764c8304f7fae542f8e7abfc295, entries=150, sequenceid=54, filesize=11.7 K 2024-11-21T00:27:44,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/4b6c1bca14274600b7cba7daea0bbc27 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4b6c1bca14274600b7cba7daea0bbc27 2024-11-21T00:27:44,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4b6c1bca14274600b7cba7daea0bbc27, entries=150, sequenceid=54, filesize=11.7 K 2024-11-21T00:27:44,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 542dd37a0a64b62316f21779fd913b59 in 1059ms, sequenceid=54, compaction requested=true 2024-11-21T00:27:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:44,131 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:44,131 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:44,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:44,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-21T00:27:44,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-21T00:27:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:44,148 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:27:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:44,150 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:44,150 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:44,150 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/A is initiating minor compaction (all files) 2024-11-21T00:27:44,151 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/A in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:44,151 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/55a444baf680485482422cb0c1de086f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2590078960ad43fa9121715b9b0b3d6a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/50f1a37e61114c1a9fe30888508c4274] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=99.1 K 2024-11-21T00:27:44,151 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:44,151 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/55a444baf680485482422cb0c1de086f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2590078960ad43fa9121715b9b0b3d6a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/50f1a37e61114c1a9fe30888508c4274] 2024-11-21T00:27:44,150 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/B is initiating minor compaction (all files) 2024-11-21T00:27:44,151 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/B in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:44,151 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/97d964d4b6444daba3de2c5a61e9376d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/b0a4163b98cd4e39a313a2c66967ad0e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/91ffe764c8304f7fae542f8e7abfc295] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=35.2 K 2024-11-21T00:27:44,152 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55a444baf680485482422cb0c1de086f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732148861726 2024-11-21T00:27:44,152 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 97d964d4b6444daba3de2c5a61e9376d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732148861726 2024-11-21T00:27:44,152 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2590078960ad43fa9121715b9b0b3d6a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732148861787 2024-11-21T00:27:44,152 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b0a4163b98cd4e39a313a2c66967ad0e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732148861787 2024-11-21T00:27:44,153 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 91ffe764c8304f7fae542f8e7abfc295, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732148862429 2024-11-21T00:27:44,154 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50f1a37e61114c1a9fe30888508c4274, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732148862429 2024-11-21T00:27:44,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ec3ef1557d254475b19b59e6a137fef6_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148863140/Put/seqid=0 2024-11-21T00:27:44,205 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#B#compaction#115 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:44,206 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/663bf093484c43fc84b0b499a7f083ad is 50, key is test_row_0/B:col10/1732148862429/Put/seqid=0 2024-11-21T00:27:44,228 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:44,263 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411213339b1096faf42769a86e43d94bf300c_542dd37a0a64b62316f21779fd913b59 store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:44,269 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411213339b1096faf42769a86e43d94bf300c_542dd37a0a64b62316f21779fd913b59, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:44,269 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411213339b1096faf42769a86e43d94bf300c_542dd37a0a64b62316f21779fd913b59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:44,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741959_1135 (size=12154) 2024-11-21T00:27:44,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:44,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:44,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:44,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148924340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148924344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148924344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148924344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,385 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ec3ef1557d254475b19b59e6a137fef6_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ec3ef1557d254475b19b59e6a137fef6_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:44,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741960_1136 (size=12104) 2024-11-21T00:27:44,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/99c13de71d444780b42b7223c7944b0d, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:44,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/99c13de71d444780b42b7223c7944b0d is 175, key is test_row_0/A:col10/1732148863140/Put/seqid=0 2024-11-21T00:27:44,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741961_1137 (size=4469) 2024-11-21T00:27:44,407 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#A#compaction#116 average throughput is 0.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:44,409 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/498f1a6a418c4fef894b7e74f70419c8 is 175, key is test_row_0/A:col10/1732148862429/Put/seqid=0 2024-11-21T00:27:44,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148924360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-21T00:27:44,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148924447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741963_1139 (size=31058) 2024-11-21T00:27:44,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741962_1138 (size=30955) 2024-11-21T00:27:44,496 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/99c13de71d444780b42b7223c7944b0d 2024-11-21T00:27:44,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148924507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/283a4789e80a411ba38a055192cc637c is 50, key is test_row_0/B:col10/1732148863140/Put/seqid=0 2024-11-21T00:27:44,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148924539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741964_1140 (size=12001) 2024-11-21T00:27:44,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148924724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148924723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:44,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148924747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:44,800 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/663bf093484c43fc84b0b499a7f083ad as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663bf093484c43fc84b0b499a7f083ad 2024-11-21T00:27:44,841 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/B of 542dd37a0a64b62316f21779fd913b59 into 663bf093484c43fc84b0b499a7f083ad(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:44,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:44,841 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/B, priority=13, startTime=1732148864131; duration=0sec 2024-11-21T00:27:44,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:44,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:B 2024-11-21T00:27:44,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:44,867 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:44,867 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/C is initiating minor compaction (all files) 2024-11-21T00:27:44,867 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/C in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:44,868 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/844831a09bbd4e238017df022eba1e93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/9d97f630096345bcb0061f6a4f460eaf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4b6c1bca14274600b7cba7daea0bbc27] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=35.2 K 2024-11-21T00:27:44,871 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 844831a09bbd4e238017df022eba1e93, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732148861726 2024-11-21T00:27:44,872 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d97f630096345bcb0061f6a4f460eaf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732148861787 2024-11-21T00:27:44,874 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b6c1bca14274600b7cba7daea0bbc27, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732148862429 2024-11-21T00:27:44,899 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#C#compaction#118 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:44,899 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/c33db9fcc1654be2a6cc92cd226b2e8f is 50, key is test_row_0/C:col10/1732148862429/Put/seqid=0 2024-11-21T00:27:44,902 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/498f1a6a418c4fef894b7e74f70419c8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/498f1a6a418c4fef894b7e74f70419c8 2024-11-21T00:27:44,924 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/A of 542dd37a0a64b62316f21779fd913b59 into 498f1a6a418c4fef894b7e74f70419c8(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:44,925 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:44,925 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/A, priority=13, startTime=1732148864131; duration=0sec 2024-11-21T00:27:44,925 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:44,925 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:A 2024-11-21T00:27:44,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-21T00:27:44,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741965_1141 (size=12104) 2024-11-21T00:27:44,971 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/283a4789e80a411ba38a055192cc637c 2024-11-21T00:27:44,971 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/c33db9fcc1654be2a6cc92cd226b2e8f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/c33db9fcc1654be2a6cc92cd226b2e8f 2024-11-21T00:27:44,982 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/C of 542dd37a0a64b62316f21779fd913b59 into c33db9fcc1654be2a6cc92cd226b2e8f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:44,983 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:44,983 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/C, priority=13, startTime=1732148864132; duration=0sec 2024-11-21T00:27:44,983 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:44,983 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:C 2024-11-21T00:27:45,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/1d7922d5c1144555b618fbc757e0235b is 50, key is test_row_0/C:col10/1732148863140/Put/seqid=0 2024-11-21T00:27:45,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741966_1142 (size=12001) 2024-11-21T00:27:45,032 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/1d7922d5c1144555b618fbc757e0235b 2024-11-21T00:27:45,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/99c13de71d444780b42b7223c7944b0d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/99c13de71d444780b42b7223c7944b0d 2024-11-21T00:27:45,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148925039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,052 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/99c13de71d444780b42b7223c7944b0d, entries=150, sequenceid=79, filesize=30.2 K 2024-11-21T00:27:45,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/283a4789e80a411ba38a055192cc637c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/283a4789e80a411ba38a055192cc637c 2024-11-21T00:27:45,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148925051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148925076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,095 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/283a4789e80a411ba38a055192cc637c, entries=150, sequenceid=79, filesize=11.7 K 2024-11-21T00:27:45,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/1d7922d5c1144555b618fbc757e0235b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/1d7922d5c1144555b618fbc757e0235b 2024-11-21T00:27:45,104 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/1d7922d5c1144555b618fbc757e0235b, entries=150, sequenceid=79, filesize=11.7 K 2024-11-21T00:27:45,105 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 542dd37a0a64b62316f21779fd913b59 in 957ms, sequenceid=79, compaction requested=false 2024-11-21T00:27:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:45,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:45,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-21T00:27:45,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-21T00:27:45,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-21T00:27:45,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2700 sec 2024-11-21T00:27:45,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.2770 sec 2024-11-21T00:27:45,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:27:45,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:45,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:45,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:45,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:45,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:45,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:45,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:45,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411211a0705256d30436f8bf40f927b33ef83_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148864341/Put/seqid=0 2024-11-21T00:27:45,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741967_1143 (size=14594) 2024-11-21T00:27:45,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148925586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148925588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148925592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148925590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148925593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148925698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148925698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148925702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148925700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148925703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148925907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148925909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,923 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:45,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148925917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,932 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411211a0705256d30436f8bf40f927b33ef83_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211a0705256d30436f8bf40f927b33ef83_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:45,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148925923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148925927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:45,937 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/a98a8a9a74044679b68709371f8dbc04, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:45,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/a98a8a9a74044679b68709371f8dbc04 is 175, key is test_row_0/A:col10/1732148864341/Put/seqid=0 2024-11-21T00:27:45,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-21T00:27:45,944 INFO [Thread-621 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-21T00:27:45,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:45,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-21T00:27:45,948 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-21T00:27:45,950 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:45,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:45,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741968_1144 (size=39549) 2024-11-21T00:27:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-21T00:27:46,103 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-21T00:27:46,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:46,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148926219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,226 INFO [master/0e7930017ff8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T00:27:46,226 INFO [master/0e7930017ff8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T00:27:46,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148926228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148926229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148926234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148926237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-21T00:27:46,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-21T00:27:46,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:46,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,402 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/a98a8a9a74044679b68709371f8dbc04 2024-11-21T00:27:46,416 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-21T00:27:46,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:46,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/031d306a1ac441d182387f61a7da11b8 is 50, key is test_row_0/B:col10/1732148864341/Put/seqid=0 2024-11-21T00:27:46,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741969_1145 (size=12001) 2024-11-21T00:27:46,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/031d306a1ac441d182387f61a7da11b8 2024-11-21T00:27:46,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/426b7bd15e2743c0a49d476680be8417 is 50, key is test_row_0/C:col10/1732148864341/Put/seqid=0 2024-11-21T00:27:46,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741970_1146 (size=12001) 2024-11-21T00:27:46,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/426b7bd15e2743c0a49d476680be8417 2024-11-21T00:27:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-21T00:27:46,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/a98a8a9a74044679b68709371f8dbc04 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/a98a8a9a74044679b68709371f8dbc04 2024-11-21T00:27:46,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/a98a8a9a74044679b68709371f8dbc04, entries=200, sequenceid=94, filesize=38.6 K 2024-11-21T00:27:46,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/031d306a1ac441d182387f61a7da11b8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/031d306a1ac441d182387f61a7da11b8 2024-11-21T00:27:46,569 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-21T00:27:46,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:46,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/031d306a1ac441d182387f61a7da11b8, entries=150, sequenceid=94, filesize=11.7 K 2024-11-21T00:27:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/426b7bd15e2743c0a49d476680be8417 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/426b7bd15e2743c0a49d476680be8417 2024-11-21T00:27:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:46,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/426b7bd15e2743c0a49d476680be8417, entries=150, sequenceid=94, filesize=11.7 K 2024-11-21T00:27:46,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 542dd37a0a64b62316f21779fd913b59 in 1156ms, sequenceid=94, compaction requested=true 2024-11-21T00:27:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:46,592 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:46,594 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:46,594 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/A is initiating minor compaction (all files) 2024-11-21T00:27:46,594 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/A in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,594 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/498f1a6a418c4fef894b7e74f70419c8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/99c13de71d444780b42b7223c7944b0d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/a98a8a9a74044679b68709371f8dbc04] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=99.2 K 2024-11-21T00:27:46,594 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:46,595 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/498f1a6a418c4fef894b7e74f70419c8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/99c13de71d444780b42b7223c7944b0d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/a98a8a9a74044679b68709371f8dbc04] 2024-11-21T00:27:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:46,595 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:46,597 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 498f1a6a418c4fef894b7e74f70419c8, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732148862429 2024-11-21T00:27:46,598 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99c13de71d444780b42b7223c7944b0d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732148863140 2024-11-21T00:27:46,599 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:46,599 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/B is initiating minor compaction (all files) 2024-11-21T00:27:46,599 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/B in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,600 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663bf093484c43fc84b0b499a7f083ad, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/283a4789e80a411ba38a055192cc637c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/031d306a1ac441d182387f61a7da11b8] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=35.3 K 2024-11-21T00:27:46,600 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a98a8a9a74044679b68709371f8dbc04, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732148864334 2024-11-21T00:27:46,602 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 663bf093484c43fc84b0b499a7f083ad, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732148862429 2024-11-21T00:27:46,603 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 283a4789e80a411ba38a055192cc637c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732148863140 2024-11-21T00:27:46,605 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 031d306a1ac441d182387f61a7da11b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732148864339 2024-11-21T00:27:46,628 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:46,637 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#B#compaction#124 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:46,638 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/62d8e210c62f4ab7b8331fc6510fad29 is 50, key is test_row_0/B:col10/1732148864341/Put/seqid=0 2024-11-21T00:27:46,651 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112162376649665b4a82b9e971b0da0d380d_542dd37a0a64b62316f21779fd913b59 store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:46,663 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112162376649665b4a82b9e971b0da0d380d_542dd37a0a64b62316f21779fd913b59, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:46,663 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112162376649665b4a82b9e971b0da0d380d_542dd37a0a64b62316f21779fd913b59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:46,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741971_1147 (size=12207) 2024-11-21T00:27:46,703 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/62d8e210c62f4ab7b8331fc6510fad29 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/62d8e210c62f4ab7b8331fc6510fad29 2024-11-21T00:27:46,720 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/B of 542dd37a0a64b62316f21779fd913b59 into 62d8e210c62f4ab7b8331fc6510fad29(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:46,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:46,720 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/B, priority=13, startTime=1732148866595; duration=0sec 2024-11-21T00:27:46,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:46,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:B 2024-11-21T00:27:46,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:46,723 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:46,723 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/C is initiating minor compaction (all files) 2024-11-21T00:27:46,723 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/C in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,723 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/c33db9fcc1654be2a6cc92cd226b2e8f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/1d7922d5c1144555b618fbc757e0235b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/426b7bd15e2743c0a49d476680be8417] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=35.3 K 2024-11-21T00:27:46,724 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c33db9fcc1654be2a6cc92cd226b2e8f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732148862429 2024-11-21T00:27:46,724 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d7922d5c1144555b618fbc757e0235b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732148863140 2024-11-21T00:27:46,725 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 426b7bd15e2743c0a49d476680be8417, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732148864339 2024-11-21T00:27:46,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-21T00:27:46,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:46,726 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:27:46,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:46,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:46,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:46,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:46,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:46,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:46,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:46,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:46,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741972_1148 (size=4469) 2024-11-21T00:27:46,748 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#A#compaction#123 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:46,749 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/6eb5ec83f3da4e7cb08bab3593439c34 is 175, key is test_row_0/A:col10/1732148864341/Put/seqid=0 2024-11-21T00:27:46,754 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#C#compaction#125 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:46,755 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/ed5d78e249b74fbeb6715ee41f8533e5 is 50, key is test_row_0/C:col10/1732148864341/Put/seqid=0 2024-11-21T00:27:46,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411218bcab1e72c654e46a3ea808107da56cb_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148865589/Put/seqid=0 2024-11-21T00:27:46,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148926753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148926753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148926753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148926760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148926760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741973_1149 (size=31161) 2024-11-21T00:27:46,781 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/6eb5ec83f3da4e7cb08bab3593439c34 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6eb5ec83f3da4e7cb08bab3593439c34 2024-11-21T00:27:46,789 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/A of 542dd37a0a64b62316f21779fd913b59 into 6eb5ec83f3da4e7cb08bab3593439c34(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:46,789 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:46,789 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/A, priority=13, startTime=1732148866592; duration=0sec 2024-11-21T00:27:46,789 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:46,789 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:A 2024-11-21T00:27:46,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741974_1150 (size=12207) 2024-11-21T00:27:46,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741975_1151 (size=12154) 2024-11-21T00:27:46,822 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/ed5d78e249b74fbeb6715ee41f8533e5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ed5d78e249b74fbeb6715ee41f8533e5 2024-11-21T00:27:46,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:46,833 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/C of 542dd37a0a64b62316f21779fd913b59 into ed5d78e249b74fbeb6715ee41f8533e5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:46,833 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:46,834 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/C, priority=13, startTime=1732148866595; duration=0sec 2024-11-21T00:27:46,834 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:46,834 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:C 2024-11-21T00:27:46,834 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411218bcab1e72c654e46a3ea808107da56cb_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411218bcab1e72c654e46a3ea808107da56cb_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:46,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2719ec3106c849d9a8584401e20d6c46, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:46,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2719ec3106c849d9a8584401e20d6c46 is 175, key is test_row_0/A:col10/1732148865589/Put/seqid=0 2024-11-21T00:27:46,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148926863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741976_1152 (size=30955) 2024-11-21T00:27:46,871 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2719ec3106c849d9a8584401e20d6c46 2024-11-21T00:27:46,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148926872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148926873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148926873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:46,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e1ab9e9e2d9949aaab8373980ed15f55 is 50, key is test_row_0/B:col10/1732148865589/Put/seqid=0 2024-11-21T00:27:46,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741977_1153 (size=12001) 2024-11-21T00:27:46,936 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e1ab9e9e2d9949aaab8373980ed15f55 2024-11-21T00:27:46,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/b0ebedb5b2f041c488985ed093313919 is 50, key is test_row_0/C:col10/1732148865589/Put/seqid=0 2024-11-21T00:27:47,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741978_1154 (size=12001) 2024-11-21T00:27:47,010 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/b0ebedb5b2f041c488985ed093313919 2024-11-21T00:27:47,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/2719ec3106c849d9a8584401e20d6c46 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2719ec3106c849d9a8584401e20d6c46 2024-11-21T00:27:47,038 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2719ec3106c849d9a8584401e20d6c46, entries=150, sequenceid=120, filesize=30.2 K 2024-11-21T00:27:47,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e1ab9e9e2d9949aaab8373980ed15f55 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e1ab9e9e2d9949aaab8373980ed15f55 2024-11-21T00:27:47,046 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e1ab9e9e2d9949aaab8373980ed15f55, entries=150, sequenceid=120, filesize=11.7 K 2024-11-21T00:27:47,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/b0ebedb5b2f041c488985ed093313919 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b0ebedb5b2f041c488985ed093313919 2024-11-21T00:27:47,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-21T00:27:47,061 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b0ebedb5b2f041c488985ed093313919, entries=150, sequenceid=120, filesize=11.7 K 2024-11-21T00:27:47,062 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 542dd37a0a64b62316f21779fd913b59 in 336ms, sequenceid=120, compaction requested=false 2024-11-21T00:27:47,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:47,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:47,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-21T00:27:47,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-21T00:27:47,067 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-21T00:27:47,067 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1140 sec 2024-11-21T00:27:47,070 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.1220 sec 2024-11-21T00:27:47,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:27:47,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:47,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:47,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:47,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:47,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:47,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:47,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:47,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121489189f1beeb4d38859a280c4246217f_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148866746/Put/seqid=0 2024-11-21T00:27:47,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741979_1155 (size=14744) 2024-11-21T00:27:47,171 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:47,176 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121489189f1beeb4d38859a280c4246217f_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121489189f1beeb4d38859a280c4246217f_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:47,178 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/6b1f096377c44e5398134c6f5265900e, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:47,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/6b1f096377c44e5398134c6f5265900e is 175, key is test_row_0/A:col10/1732148866746/Put/seqid=0 2024-11-21T00:27:47,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148927167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148927167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148927167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148927178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741980_1156 (size=39699) 2024-11-21T00:27:47,215 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/6b1f096377c44e5398134c6f5265900e 2024-11-21T00:27:47,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/5b1aa2832ec244e5ba4b85b558a9a85a is 50, key is test_row_0/B:col10/1732148866746/Put/seqid=0 2024-11-21T00:27:47,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741981_1157 (size=12101) 2024-11-21T00:27:47,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148927280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148927289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148927311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148927312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148927488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148927511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148927515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148927522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/5b1aa2832ec244e5ba4b85b558a9a85a 2024-11-21T00:27:47,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/ce5c79421c334e6c86e1038ebbf69962 is 50, key is test_row_0/C:col10/1732148866746/Put/seqid=0 2024-11-21T00:27:47,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741982_1158 (size=12101) 2024-11-21T00:27:47,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/ce5c79421c334e6c86e1038ebbf69962 2024-11-21T00:27:47,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/6b1f096377c44e5398134c6f5265900e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6b1f096377c44e5398134c6f5265900e 2024-11-21T00:27:47,768 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6b1f096377c44e5398134c6f5265900e, entries=200, sequenceid=134, filesize=38.8 K 2024-11-21T00:27:47,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/5b1aa2832ec244e5ba4b85b558a9a85a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5b1aa2832ec244e5ba4b85b558a9a85a 2024-11-21T00:27:47,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148927770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5b1aa2832ec244e5ba4b85b558a9a85a, entries=150, sequenceid=134, filesize=11.8 K 2024-11-21T00:27:47,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/ce5c79421c334e6c86e1038ebbf69962 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ce5c79421c334e6c86e1038ebbf69962 2024-11-21T00:27:47,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ce5c79421c334e6c86e1038ebbf69962, entries=150, sequenceid=134, filesize=11.8 K 2024-11-21T00:27:47,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 542dd37a0a64b62316f21779fd913b59 in 706ms, sequenceid=134, compaction requested=true 2024-11-21T00:27:47,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:47,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:47,786 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:47,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:47,786 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:47,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:47,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:47,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:47,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:47,788 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:47,788 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/A is initiating minor compaction (all files) 2024-11-21T00:27:47,788 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/A in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:47,788 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6eb5ec83f3da4e7cb08bab3593439c34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2719ec3106c849d9a8584401e20d6c46, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6b1f096377c44e5398134c6f5265900e] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=99.4 K 2024-11-21T00:27:47,788 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:47,788 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6eb5ec83f3da4e7cb08bab3593439c34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2719ec3106c849d9a8584401e20d6c46, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6b1f096377c44e5398134c6f5265900e] 2024-11-21T00:27:47,789 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:47,789 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/B is initiating minor compaction (all files) 2024-11-21T00:27:47,789 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/B in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:47,789 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/62d8e210c62f4ab7b8331fc6510fad29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e1ab9e9e2d9949aaab8373980ed15f55, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5b1aa2832ec244e5ba4b85b558a9a85a] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=35.5 K 2024-11-21T00:27:47,789 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6eb5ec83f3da4e7cb08bab3593439c34, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732148864339 2024-11-21T00:27:47,790 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 62d8e210c62f4ab7b8331fc6510fad29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732148864339 2024-11-21T00:27:47,790 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2719ec3106c849d9a8584401e20d6c46, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732148865585 2024-11-21T00:27:47,790 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ab9e9e2d9949aaab8373980ed15f55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732148865585 2024-11-21T00:27:47,790 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b1f096377c44e5398134c6f5265900e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732148866746 2024-11-21T00:27:47,792 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b1aa2832ec244e5ba4b85b558a9a85a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732148866746 2024-11-21T00:27:47,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:27:47,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:47,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:47,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:47,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:47,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:47,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:47,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:47,817 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#B#compaction#132 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:47,817 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e422160cb4dd46158d3ab4b454ff318b is 50, key is test_row_0/B:col10/1732148866746/Put/seqid=0 2024-11-21T00:27:47,825 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:47,830 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411211d622313b7254143a5c2ac6f301b7a80_542dd37a0a64b62316f21779fd913b59 store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:47,833 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411211d622313b7254143a5c2ac6f301b7a80_542dd37a0a64b62316f21779fd913b59, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:47,833 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411211d622313b7254143a5c2ac6f301b7a80_542dd37a0a64b62316f21779fd913b59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:47,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741983_1159 (size=12409) 2024-11-21T00:27:47,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148927848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148927848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,859 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e422160cb4dd46158d3ab4b454ff318b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e422160cb4dd46158d3ab4b454ff318b 2024-11-21T00:27:47,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148927853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148927854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,867 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/B of 542dd37a0a64b62316f21779fd913b59 into e422160cb4dd46158d3ab4b454ff318b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:47,867 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:47,867 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/B, priority=13, startTime=1732148867786; duration=0sec 2024-11-21T00:27:47,868 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:47,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f34e8edd2a4a4ba8980819435b4395ff_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148867156/Put/seqid=0 2024-11-21T00:27:47,868 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:B 2024-11-21T00:27:47,868 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:47,870 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:47,870 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/C is initiating minor compaction (all files) 2024-11-21T00:27:47,870 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/C in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:47,870 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ed5d78e249b74fbeb6715ee41f8533e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b0ebedb5b2f041c488985ed093313919, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ce5c79421c334e6c86e1038ebbf69962] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=35.5 K 2024-11-21T00:27:47,871 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ed5d78e249b74fbeb6715ee41f8533e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732148864339 2024-11-21T00:27:47,872 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b0ebedb5b2f041c488985ed093313919, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732148865585 2024-11-21T00:27:47,874 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ce5c79421c334e6c86e1038ebbf69962, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732148866746 2024-11-21T00:27:47,886 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#C#compaction#135 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:47,887 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/4212bd5e4c9d4b8b98a39c17dd5b43de is 50, key is test_row_0/C:col10/1732148866746/Put/seqid=0 2024-11-21T00:27:47,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741984_1160 (size=4469) 2024-11-21T00:27:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741985_1161 (size=12304) 2024-11-21T00:27:47,912 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:47,917 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f34e8edd2a4a4ba8980819435b4395ff_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f34e8edd2a4a4ba8980819435b4395ff_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:47,919 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ced0aa55a3704e5991e2d78a04fcdbe8, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:47,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ced0aa55a3704e5991e2d78a04fcdbe8 is 175, key is test_row_0/A:col10/1732148867156/Put/seqid=0 2024-11-21T00:27:47,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741986_1162 (size=12409) 2024-11-21T00:27:47,959 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/4212bd5e4c9d4b8b98a39c17dd5b43de as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4212bd5e4c9d4b8b98a39c17dd5b43de 2024-11-21T00:27:47,969 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/C of 542dd37a0a64b62316f21779fd913b59 into 4212bd5e4c9d4b8b98a39c17dd5b43de(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:47,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148927958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148927958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148927966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:47,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148927967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:47,979 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:47,979 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/C, priority=13, startTime=1732148867787; duration=0sec 2024-11-21T00:27:47,979 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:47,979 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:C 2024-11-21T00:27:47,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741987_1163 (size=31105) 2024-11-21T00:27:47,990 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ced0aa55a3704e5991e2d78a04fcdbe8 2024-11-21T00:27:48,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/5080bbf11838431d824301a4f9d1bd7c is 50, key is test_row_0/B:col10/1732148867156/Put/seqid=0 2024-11-21T00:27:48,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-21T00:27:48,059 INFO [Thread-621 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-21T00:27:48,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-21T00:27:48,065 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:48,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-21T00:27:48,066 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:48,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:48,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741988_1164 (size=12151) 2024-11-21T00:27:48,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-21T00:27:48,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148928173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148928176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148928177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148928179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,219 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-21T00:27:48,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:48,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:48,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:48,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,296 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#A#compaction#133 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:48,296 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/1247353d74514c26a73abb9b5a9feb7a is 175, key is test_row_0/A:col10/1732148866746/Put/seqid=0 2024-11-21T00:27:48,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741989_1165 (size=31363) 2024-11-21T00:27:48,367 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/1247353d74514c26a73abb9b5a9feb7a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1247353d74514c26a73abb9b5a9feb7a 2024-11-21T00:27:48,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-21T00:27:48,380 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/A of 542dd37a0a64b62316f21779fd913b59 into 1247353d74514c26a73abb9b5a9feb7a(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:48,380 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:48,380 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/A, priority=13, startTime=1732148867786; duration=0sec 2024-11-21T00:27:48,380 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:48,380 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:A 2024-11-21T00:27:48,390 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-21T00:27:48,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:48,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:48,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:48,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/5080bbf11838431d824301a4f9d1bd7c 2024-11-21T00:27:48,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148928494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148928494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148928495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/a77764b0073f454cbc9ab26e64172d41 is 50, key is test_row_0/C:col10/1732148867156/Put/seqid=0 2024-11-21T00:27:48,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148928498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741990_1166 (size=12151) 2024-11-21T00:27:48,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/a77764b0073f454cbc9ab26e64172d41 2024-11-21T00:27:48,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ced0aa55a3704e5991e2d78a04fcdbe8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ced0aa55a3704e5991e2d78a04fcdbe8 2024-11-21T00:27:48,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ced0aa55a3704e5991e2d78a04fcdbe8, entries=150, sequenceid=159, filesize=30.4 K 2024-11-21T00:27:48,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/5080bbf11838431d824301a4f9d1bd7c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5080bbf11838431d824301a4f9d1bd7c 2024-11-21T00:27:48,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5080bbf11838431d824301a4f9d1bd7c, entries=150, sequenceid=159, filesize=11.9 K 2024-11-21T00:27:48,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/a77764b0073f454cbc9ab26e64172d41 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a77764b0073f454cbc9ab26e64172d41 2024-11-21T00:27:48,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-21T00:27:48,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:48,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:48,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:48,549 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:48,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a77764b0073f454cbc9ab26e64172d41, entries=150, sequenceid=159, filesize=11.9 K 2024-11-21T00:27:48,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 542dd37a0a64b62316f21779fd913b59 in 746ms, sequenceid=159, compaction requested=false 2024-11-21T00:27:48,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:48,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-21T00:27:48,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:48,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-21T00:27:48,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:48,711 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:27:48,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:48,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:48,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:48,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:48,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:48,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:48,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411217bf3c6e6808d44e0b601d01a3d783e09_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148867840/Put/seqid=0 2024-11-21T00:27:48,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741991_1167 (size=12304) 2024-11-21T00:27:48,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:48,775 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411217bf3c6e6808d44e0b601d01a3d783e09_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411217bf3c6e6808d44e0b601d01a3d783e09_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:48,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9d57323fe7f2454886aaf5be197e2162, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:48,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9d57323fe7f2454886aaf5be197e2162 is 175, key is test_row_0/A:col10/1732148867840/Put/seqid=0 2024-11-21T00:27:48,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741992_1168 (size=31105) 2024-11-21T00:27:48,971 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-21T00:27:48,971 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-21T00:27:49,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:49,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:49,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148929077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148929083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148929087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148929089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-21T00:27:49,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148929199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148929199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148929203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148929209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,230 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9d57323fe7f2454886aaf5be197e2162 2024-11-21T00:27:49,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/a4a0a919615a42dab5beea1ea965655d is 50, key is test_row_0/B:col10/1732148867840/Put/seqid=0 2024-11-21T00:27:49,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741993_1169 (size=12151) 2024-11-21T00:27:49,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148929408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148929418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148929419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148929423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148929723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,735 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/a4a0a919615a42dab5beea1ea965655d 2024-11-21T00:27:49,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148929740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148929746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148929759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/01afbfcbe8af47de964dfa65fecb72ba is 50, key is test_row_0/C:col10/1732148867840/Put/seqid=0 2024-11-21T00:27:49,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148929779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:49,787 DEBUG [Thread-611 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., hostname=0e7930017ff8,37961,1732148819586, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:49,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741994_1170 (size=12151) 2024-11-21T00:27:49,789 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/01afbfcbe8af47de964dfa65fecb72ba 2024-11-21T00:27:49,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9d57323fe7f2454886aaf5be197e2162 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9d57323fe7f2454886aaf5be197e2162 2024-11-21T00:27:49,805 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9d57323fe7f2454886aaf5be197e2162, entries=150, sequenceid=173, filesize=30.4 K 2024-11-21T00:27:49,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/a4a0a919615a42dab5beea1ea965655d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4a0a919615a42dab5beea1ea965655d 2024-11-21T00:27:49,813 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4a0a919615a42dab5beea1ea965655d, entries=150, sequenceid=173, filesize=11.9 K 2024-11-21T00:27:49,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/01afbfcbe8af47de964dfa65fecb72ba as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/01afbfcbe8af47de964dfa65fecb72ba 2024-11-21T00:27:49,820 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/01afbfcbe8af47de964dfa65fecb72ba, entries=150, sequenceid=173, filesize=11.9 K 2024-11-21T00:27:49,822 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 542dd37a0a64b62316f21779fd913b59 in 1112ms, sequenceid=173, compaction requested=true 2024-11-21T00:27:49,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:49,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:49,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-21T00:27:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-21T00:27:49,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-21T00:27:49,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7570 sec 2024-11-21T00:27:49,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.7680 sec 2024-11-21T00:27:50,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-21T00:27:50,176 INFO [Thread-621 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-21T00:27:50,177 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:50,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-21T00:27:50,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:50,180 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:50,181 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:50,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:50,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-21T00:27:50,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:50,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:50,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:50,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:50,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:50,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:50,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:50,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112122be52ca0093413a9f68549e7a7e018f_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148869072/Put/seqid=0 2024-11-21T00:27:50,337 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:50,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:50,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741995_1171 (size=14794) 2024-11-21T00:27:50,383 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:50,395 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112122be52ca0093413a9f68549e7a7e018f_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112122be52ca0093413a9f68549e7a7e018f_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:50,397 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/22cab42e5cc347c3a16313ab8bed511d, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:50,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/22cab42e5cc347c3a16313ab8bed511d is 175, key is test_row_0/A:col10/1732148869072/Put/seqid=0 2024-11-21T00:27:50,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741996_1172 (size=39749) 2024-11-21T00:27:50,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:50,499 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:50,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:50,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148930397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148930522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148930545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148930552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148930652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148930652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148930655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:50,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:50,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148930680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:50,818 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:50,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:50,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,858 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/22cab42e5cc347c3a16313ab8bed511d 2024-11-21T00:27:50,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148930866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148930867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148930869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/a4453ecc5fe849629548e3f9f7b2fd0b is 50, key is test_row_0/B:col10/1732148869072/Put/seqid=0 2024-11-21T00:27:50,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:50,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148930889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741997_1173 (size=12151) 2024-11-21T00:27:50,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/a4453ecc5fe849629548e3f9f7b2fd0b 2024-11-21T00:27:50,971 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:50,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:50,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:50,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:50,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:50,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/d3b6dbd5d4644bba9dab5aa6c546645d is 50, key is test_row_0/C:col10/1732148869072/Put/seqid=0 2024-11-21T00:27:51,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741998_1174 (size=12151) 2024-11-21T00:27:51,125 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:51,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:51,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148931174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148931175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148931179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148931197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,278 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:51,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:51,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:51,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:51,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:51,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/d3b6dbd5d4644bba9dab5aa6c546645d 2024-11-21T00:27:51,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/22cab42e5cc347c3a16313ab8bed511d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/22cab42e5cc347c3a16313ab8bed511d 2024-11-21T00:27:51,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/22cab42e5cc347c3a16313ab8bed511d, entries=200, sequenceid=199, filesize=38.8 K 2024-11-21T00:27:51,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/a4453ecc5fe849629548e3f9f7b2fd0b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4453ecc5fe849629548e3f9f7b2fd0b 2024-11-21T00:27:51,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4453ecc5fe849629548e3f9f7b2fd0b, entries=150, sequenceid=199, filesize=11.9 K 2024-11-21T00:27:51,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/d3b6dbd5d4644bba9dab5aa6c546645d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/d3b6dbd5d4644bba9dab5aa6c546645d 2024-11-21T00:27:51,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:51,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:51,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/d3b6dbd5d4644bba9dab5aa6c546645d, entries=150, sequenceid=199, filesize=11.9 K 2024-11-21T00:27:51,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 542dd37a0a64b62316f21779fd913b59 in 1362ms, sequenceid=199, compaction requested=true 2024-11-21T00:27:51,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:51,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:51,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:51,613 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:51,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:51,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:51,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:51,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:27:51,613 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:51,616 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133322 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:51,616 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:51,622 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/A is initiating minor compaction (all files) 2024-11-21T00:27:51,622 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/A in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,623 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/B is initiating minor compaction (all files) 2024-11-21T00:27:51,623 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/B in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,623 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e422160cb4dd46158d3ab4b454ff318b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5080bbf11838431d824301a4f9d1bd7c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4a0a919615a42dab5beea1ea965655d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4453ecc5fe849629548e3f9f7b2fd0b] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=47.7 K 2024-11-21T00:27:51,673 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1247353d74514c26a73abb9b5a9feb7a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ced0aa55a3704e5991e2d78a04fcdbe8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9d57323fe7f2454886aaf5be197e2162, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/22cab42e5cc347c3a16313ab8bed511d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=130.2 K 2024-11-21T00:27:51,673 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,673 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1247353d74514c26a73abb9b5a9feb7a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ced0aa55a3704e5991e2d78a04fcdbe8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9d57323fe7f2454886aaf5be197e2162, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/22cab42e5cc347c3a16313ab8bed511d] 2024-11-21T00:27:51,674 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e422160cb4dd46158d3ab4b454ff318b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732148866746 2024-11-21T00:27:51,675 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1247353d74514c26a73abb9b5a9feb7a, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732148866746 2024-11-21T00:27:51,675 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5080bbf11838431d824301a4f9d1bd7c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732148867156 2024-11-21T00:27:51,675 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ced0aa55a3704e5991e2d78a04fcdbe8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732148867156 2024-11-21T00:27:51,676 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting a4a0a919615a42dab5beea1ea965655d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732148867840 2024-11-21T00:27:51,677 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting a4453ecc5fe849629548e3f9f7b2fd0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732148869072 2024-11-21T00:27:51,689 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d57323fe7f2454886aaf5be197e2162, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732148867840 2024-11-21T00:27:51,690 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22cab42e5cc347c3a16313ab8bed511d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732148869066 2024-11-21T00:27:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:51,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-21T00:27:51,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:51,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:51,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:51,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:51,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:51,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:51,724 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:51,738 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#B#compaction#145 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:51,739 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1e128ce42c1048e28d3163e2e752d2ce is 50, key is test_row_0/B:col10/1732148869072/Put/seqid=0 2024-11-21T00:27:51,744 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,748 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411212f569b17677c4d1eb9431c1e4816d4f3_542dd37a0a64b62316f21779fd913b59 store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:51,751 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411212f569b17677c4d1eb9431c1e4816d4f3_542dd37a0a64b62316f21779fd913b59, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:51,751 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411212f569b17677c4d1eb9431c1e4816d4f3_542dd37a0a64b62316f21779fd913b59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:51,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741999_1175 (size=12595) 2024-11-21T00:27:51,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742000_1176 (size=4469) 2024-11-21T00:27:51,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f1b8b0f840aa430593bdcaffed2970d5_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148870382/Put/seqid=0 2024-11-21T00:27:51,824 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#A#compaction#144 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:51,825 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/eca0ab5c971b4676a63ce76cc1674998 is 175, key is test_row_0/A:col10/1732148869072/Put/seqid=0 2024-11-21T00:27:51,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148931836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148931837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148931839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148931837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742001_1177 (size=12304) 2024-11-21T00:27:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742002_1178 (size=31549) 2024-11-21T00:27:51,898 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:51,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:51,912 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/eca0ab5c971b4676a63ce76cc1674998 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/eca0ab5c971b4676a63ce76cc1674998 2024-11-21T00:27:51,920 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/A of 542dd37a0a64b62316f21779fd913b59 into eca0ab5c971b4676a63ce76cc1674998(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:51,921 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:51,921 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/A, priority=12, startTime=1732148871613; duration=0sec 2024-11-21T00:27:51,921 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:51,921 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:A 2024-11-21T00:27:51,921 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:27:51,924 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:27:51,924 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/C is initiating minor compaction (all files) 2024-11-21T00:27:51,924 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/C in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:51,924 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4212bd5e4c9d4b8b98a39c17dd5b43de, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a77764b0073f454cbc9ab26e64172d41, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/01afbfcbe8af47de964dfa65fecb72ba, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/d3b6dbd5d4644bba9dab5aa6c546645d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=47.7 K 2024-11-21T00:27:51,928 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4212bd5e4c9d4b8b98a39c17dd5b43de, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732148866746 2024-11-21T00:27:51,928 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a77764b0073f454cbc9ab26e64172d41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732148867156 2024-11-21T00:27:51,929 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01afbfcbe8af47de964dfa65fecb72ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732148867840 2024-11-21T00:27:51,929 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3b6dbd5d4644bba9dab5aa6c546645d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732148869072 2024-11-21T00:27:51,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148931949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148931950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148931950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:51,960 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#C#compaction#147 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:51,961 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/da40b3f40ab942b19e76d09c2fb07eb1 is 50, key is test_row_0/C:col10/1732148869072/Put/seqid=0 2024-11-21T00:27:51,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148931955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742003_1179 (size=12595) 2024-11-21T00:27:52,047 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/da40b3f40ab942b19e76d09c2fb07eb1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/da40b3f40ab942b19e76d09c2fb07eb1 2024-11-21T00:27:52,053 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/C of 542dd37a0a64b62316f21779fd913b59 into da40b3f40ab942b19e76d09c2fb07eb1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:52,053 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:52,053 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/C, priority=12, startTime=1732148871613; duration=0sec 2024-11-21T00:27:52,054 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:52,054 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:C 2024-11-21T00:27:52,062 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:52,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:52,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148932156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148932156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148932157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148932165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:52,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:52,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,218 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1e128ce42c1048e28d3163e2e752d2ce as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1e128ce42c1048e28d3163e2e752d2ce 2024-11-21T00:27:52,237 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/B of 542dd37a0a64b62316f21779fd913b59 into 1e128ce42c1048e28d3163e2e752d2ce(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:52,237 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:52,237 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/B, priority=12, startTime=1732148871613; duration=0sec 2024-11-21T00:27:52,237 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:52,237 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:B 2024-11-21T00:27:52,278 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:52,285 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f1b8b0f840aa430593bdcaffed2970d5_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f1b8b0f840aa430593bdcaffed2970d5_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:52,288 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/5bc2b43ec2544be7ac47931692c3b7d5, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:52,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/5bc2b43ec2544be7ac47931692c3b7d5 is 175, key is test_row_0/A:col10/1732148870382/Put/seqid=0 2024-11-21T00:27:52,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742004_1180 (size=31105) 2024-11-21T00:27:52,317 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/5bc2b43ec2544be7ac47931692c3b7d5 2024-11-21T00:27:52,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/f031d1191b954808b94c8f963fa16a00 is 50, key is test_row_0/B:col10/1732148870382/Put/seqid=0 2024-11-21T00:27:52,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742005_1181 (size=12151) 2024-11-21T00:27:52,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:52,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:52,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148932466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148932467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148932467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148932478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,525 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:52,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:52,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,686 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:52,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:52,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/f031d1191b954808b94c8f963fa16a00 2024-11-21T00:27:52,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/e5c0ee8ab720443385e1b92fb0427c44 is 50, key is test_row_0/C:col10/1732148870382/Put/seqid=0 2024-11-21T00:27:52,842 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:52,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:52,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:52,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:52,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742006_1182 (size=12151) 2024-11-21T00:27:52,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/e5c0ee8ab720443385e1b92fb0427c44 2024-11-21T00:27:52,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/5bc2b43ec2544be7ac47931692c3b7d5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/5bc2b43ec2544be7ac47931692c3b7d5 2024-11-21T00:27:52,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/5bc2b43ec2544be7ac47931692c3b7d5, entries=150, sequenceid=212, filesize=30.4 K 2024-11-21T00:27:52,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/f031d1191b954808b94c8f963fa16a00 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/f031d1191b954808b94c8f963fa16a00 2024-11-21T00:27:52,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/f031d1191b954808b94c8f963fa16a00, entries=150, sequenceid=212, filesize=11.9 K 2024-11-21T00:27:52,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/e5c0ee8ab720443385e1b92fb0427c44 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/e5c0ee8ab720443385e1b92fb0427c44 2024-11-21T00:27:52,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/e5c0ee8ab720443385e1b92fb0427c44, entries=150, sequenceid=212, filesize=11.9 K 2024-11-21T00:27:52,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 542dd37a0a64b62316f21779fd913b59 in 1202ms, sequenceid=212, compaction requested=false 2024-11-21T00:27:52,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:52,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:52,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:27:52,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:52,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:52,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:52,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:52,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:52,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:52,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148932992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:52,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148932993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:52,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148932997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148932998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112123c4b572b4c5475eb783e28168f80c11_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148872976/Put/seqid=0 2024-11-21T00:27:53,007 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:53,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:53,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742007_1183 (size=12304) 2024-11-21T00:27:53,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148933098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148933099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148933103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148933103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:53,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:53,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148933303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148933304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148933306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148933306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,314 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,454 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:53,459 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112123c4b572b4c5475eb783e28168f80c11_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112123c4b572b4c5475eb783e28168f80c11_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:53,461 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/3654182756a54384ae60b643e3f41c5f, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:53,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/3654182756a54384ae60b643e3f41c5f is 175, key is test_row_0/A:col10/1732148872976/Put/seqid=0 2024-11-21T00:27:53,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742008_1184 (size=31105) 2024-11-21T00:27:53,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148933607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148933608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148933609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148933611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,622 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:53,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:53,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,777 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:53,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:53,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:53,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47732 deadline: 1732148933791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,795 DEBUG [Thread-611 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8201 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., hostname=0e7930017ff8,37961,1732148819586, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:53,871 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/3654182756a54384ae60b643e3f41c5f 2024-11-21T00:27:53,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/bdf551c86a014c0b81573b26be90a2bc is 50, key is test_row_0/B:col10/1732148872976/Put/seqid=0 2024-11-21T00:27:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742009_1185 (size=12151) 2024-11-21T00:27:53,930 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:53,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:53,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:53,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:53,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:53,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:54,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:54,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:54,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148934112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:54,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148934113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:54,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148934113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:54,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148934114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,249 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:54,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:54,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:54,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/bdf551c86a014c0b81573b26be90a2bc 2024-11-21T00:27:54,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/5842291152af4977bb66668ff2c0b37f is 50, key is test_row_0/C:col10/1732148872976/Put/seqid=0 2024-11-21T00:27:54,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742010_1186 (size=12151) 2024-11-21T00:27:54,403 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,560 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,718 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:54,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/5842291152af4977bb66668ff2c0b37f 2024-11-21T00:27:54,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/3654182756a54384ae60b643e3f41c5f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/3654182756a54384ae60b643e3f41c5f 2024-11-21T00:27:54,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/3654182756a54384ae60b643e3f41c5f, entries=150, sequenceid=240, filesize=30.4 K 2024-11-21T00:27:54,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/bdf551c86a014c0b81573b26be90a2bc as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/bdf551c86a014c0b81573b26be90a2bc 2024-11-21T00:27:54,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/bdf551c86a014c0b81573b26be90a2bc, entries=150, sequenceid=240, filesize=11.9 K 2024-11-21T00:27:54,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/5842291152af4977bb66668ff2c0b37f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5842291152af4977bb66668ff2c0b37f 2024-11-21T00:27:54,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5842291152af4977bb66668ff2c0b37f, entries=150, sequenceid=240, filesize=11.9 K 2024-11-21T00:27:54,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 542dd37a0a64b62316f21779fd913b59 in 1829ms, sequenceid=240, compaction requested=true 2024-11-21T00:27:54,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:54,808 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:54,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:54,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:54,809 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:54,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:54,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:54,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:54,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:54,811 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:54,811 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/A is initiating minor compaction (all files) 2024-11-21T00:27:54,811 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/A in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,811 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/eca0ab5c971b4676a63ce76cc1674998, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/5bc2b43ec2544be7ac47931692c3b7d5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/3654182756a54384ae60b643e3f41c5f] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=91.6 K 2024-11-21T00:27:54,811 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,811 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/eca0ab5c971b4676a63ce76cc1674998, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/5bc2b43ec2544be7ac47931692c3b7d5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/3654182756a54384ae60b643e3f41c5f] 2024-11-21T00:27:54,812 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:54,812 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting eca0ab5c971b4676a63ce76cc1674998, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732148869072 2024-11-21T00:27:54,812 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/B is initiating minor compaction (all files) 2024-11-21T00:27:54,812 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/B in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,812 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1e128ce42c1048e28d3163e2e752d2ce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/f031d1191b954808b94c8f963fa16a00, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/bdf551c86a014c0b81573b26be90a2bc] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=36.0 K 2024-11-21T00:27:54,815 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e128ce42c1048e28d3163e2e752d2ce, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732148869072 2024-11-21T00:27:54,815 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5bc2b43ec2544be7ac47931692c3b7d5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732148870382 2024-11-21T00:27:54,815 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3654182756a54384ae60b643e3f41c5f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732148871822 2024-11-21T00:27:54,815 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f031d1191b954808b94c8f963fa16a00, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732148870382 2024-11-21T00:27:54,816 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting bdf551c86a014c0b81573b26be90a2bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732148871822 2024-11-21T00:27:54,831 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:54,833 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#B#compaction#153 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:54,833 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e9a309befa3e4ee69e083f235f84d5df is 50, key is test_row_0/B:col10/1732148872976/Put/seqid=0 2024-11-21T00:27:54,834 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121ee4931b5b4a4438e82eb0d217440c7dc_542dd37a0a64b62316f21779fd913b59 store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:54,836 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121ee4931b5b4a4438e82eb0d217440c7dc_542dd37a0a64b62316f21779fd913b59, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:54,836 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ee4931b5b4a4438e82eb0d217440c7dc_542dd37a0a64b62316f21779fd913b59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742011_1187 (size=12697) 2024-11-21T00:27:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742012_1188 (size=4469) 2024-11-21T00:27:54,876 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:54,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-21T00:27:54,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:54,876 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:27:54,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:54,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:54,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:54,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:54,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:54,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:54,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b664345daab04d82b7a2d5fb8995ba6b_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148872987/Put/seqid=0 2024-11-21T00:27:54,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742013_1189 (size=12304) 2024-11-21T00:27:55,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:55,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148935144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148935144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148935146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148935149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,249 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#A#compaction#154 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:55,250 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/7878e40473d74b38af7556e2c4436d93 is 175, key is test_row_0/A:col10/1732148872976/Put/seqid=0 2024-11-21T00:27:55,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148935250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148935250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148935253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148935256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,267 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e9a309befa3e4ee69e083f235f84d5df as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e9a309befa3e4ee69e083f235f84d5df 2024-11-21T00:27:55,285 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/B of 542dd37a0a64b62316f21779fd913b59 into e9a309befa3e4ee69e083f235f84d5df(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:55,285 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:55,285 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/B, priority=13, startTime=1732148874808; duration=0sec 2024-11-21T00:27:55,285 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:55,285 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:B 2024-11-21T00:27:55,285 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:55,286 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:55,286 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/C is initiating minor compaction (all files) 2024-11-21T00:27:55,286 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/C in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:55,287 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/da40b3f40ab942b19e76d09c2fb07eb1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/e5c0ee8ab720443385e1b92fb0427c44, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5842291152af4977bb66668ff2c0b37f] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=36.0 K 2024-11-21T00:27:55,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:55,291 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting da40b3f40ab942b19e76d09c2fb07eb1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732148869072 2024-11-21T00:27:55,292 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e5c0ee8ab720443385e1b92fb0427c44, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732148870382 2024-11-21T00:27:55,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742014_1190 (size=31651) 2024-11-21T00:27:55,296 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b664345daab04d82b7a2d5fb8995ba6b_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b664345daab04d82b7a2d5fb8995ba6b_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:55,296 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5842291152af4977bb66668ff2c0b37f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732148871822 2024-11-21T00:27:55,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/aaf28f30ea94413c9133d2ee10bc17bc, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:55,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/aaf28f30ea94413c9133d2ee10bc17bc is 175, key is test_row_0/A:col10/1732148872987/Put/seqid=0 2024-11-21T00:27:55,309 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/7878e40473d74b38af7556e2c4436d93 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/7878e40473d74b38af7556e2c4436d93 2024-11-21T00:27:55,318 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/A of 542dd37a0a64b62316f21779fd913b59 into 7878e40473d74b38af7556e2c4436d93(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:55,318 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:55,318 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/A, priority=13, startTime=1732148874808; duration=0sec 2024-11-21T00:27:55,318 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:55,318 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:A 2024-11-21T00:27:55,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742015_1191 (size=31105) 2024-11-21T00:27:55,333 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#C#compaction#156 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:55,334 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/2ecbd1f205bc4fc389d54b730941c090 is 50, key is test_row_0/C:col10/1732148872976/Put/seqid=0 2024-11-21T00:27:55,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742016_1192 (size=12697) 2024-11-21T00:27:55,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148935456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148935456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148935458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148935458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,729 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/aaf28f30ea94413c9133d2ee10bc17bc 2024-11-21T00:27:55,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1addba63d17844b784a9bd26d49732e2 is 50, key is test_row_0/B:col10/1732148872987/Put/seqid=0 2024-11-21T00:27:55,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742017_1193 (size=12151) 2024-11-21T00:27:55,762 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1addba63d17844b784a9bd26d49732e2 2024-11-21T00:27:55,766 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/2ecbd1f205bc4fc389d54b730941c090 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/2ecbd1f205bc4fc389d54b730941c090 2024-11-21T00:27:55,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148935767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148935768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148935769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148935767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:55,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/396ff94f7a5042cc8f2bbd60b2667a20 is 50, key is test_row_0/C:col10/1732148872987/Put/seqid=0 2024-11-21T00:27:55,795 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/C of 542dd37a0a64b62316f21779fd913b59 into 2ecbd1f205bc4fc389d54b730941c090(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:55,795 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:55,795 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/C, priority=13, startTime=1732148874809; duration=0sec 2024-11-21T00:27:55,795 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:55,796 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:C 2024-11-21T00:27:55,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742018_1194 (size=12151) 2024-11-21T00:27:55,821 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/396ff94f7a5042cc8f2bbd60b2667a20 2024-11-21T00:27:55,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/aaf28f30ea94413c9133d2ee10bc17bc as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/aaf28f30ea94413c9133d2ee10bc17bc 2024-11-21T00:27:55,836 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/aaf28f30ea94413c9133d2ee10bc17bc, entries=150, sequenceid=252, filesize=30.4 K 2024-11-21T00:27:55,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1addba63d17844b784a9bd26d49732e2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1addba63d17844b784a9bd26d49732e2 2024-11-21T00:27:55,843 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1addba63d17844b784a9bd26d49732e2, entries=150, sequenceid=252, filesize=11.9 K 2024-11-21T00:27:55,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/396ff94f7a5042cc8f2bbd60b2667a20 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/396ff94f7a5042cc8f2bbd60b2667a20 2024-11-21T00:27:55,861 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/396ff94f7a5042cc8f2bbd60b2667a20, entries=150, sequenceid=252, filesize=11.9 K 2024-11-21T00:27:55,862 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 542dd37a0a64b62316f21779fd913b59 in 985ms, sequenceid=252, compaction requested=false 2024-11-21T00:27:55,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:55,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:55,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-21T00:27:55,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-21T00:27:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-21T00:27:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 5.6830 sec 2024-11-21T00:27:55,867 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 5.6890 sec 2024-11-21T00:27:56,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:56,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-21T00:27:56,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:56,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:56,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:56,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411214f3ae2cba6c64a468d9e77646b068d2e_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148876288/Put/seqid=0 2024-11-21T00:27:56,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148936310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148936319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148936319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148936321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742019_1195 (size=14994) 2024-11-21T00:27:56,354 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:56,361 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411214f3ae2cba6c64a468d9e77646b068d2e_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411214f3ae2cba6c64a468d9e77646b068d2e_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:56,362 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/56c6d56f8dca438193a4ff259e8ce445, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:56,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/56c6d56f8dca438193a4ff259e8ce445 is 175, key is test_row_0/A:col10/1732148876288/Put/seqid=0 2024-11-21T00:27:56,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742020_1196 (size=39949) 2024-11-21T00:27:56,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148936422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148936425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148936425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148936425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148936627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148936635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148936634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148936635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,809 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/56c6d56f8dca438193a4ff259e8ce445 2024-11-21T00:27:56,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1c4a56519394443384c8078de785c734 is 50, key is test_row_0/B:col10/1732148876288/Put/seqid=0 2024-11-21T00:27:56,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742021_1197 (size=12301) 2024-11-21T00:27:56,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1c4a56519394443384c8078de785c734 2024-11-21T00:27:56,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/3c2fe42dfa3c443c9be5141bbd579574 is 50, key is test_row_0/C:col10/1732148876288/Put/seqid=0 2024-11-21T00:27:56,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148936947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148936947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148936955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:56,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148936956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:56,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742022_1198 (size=12301) 2024-11-21T00:27:56,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/3c2fe42dfa3c443c9be5141bbd579574 2024-11-21T00:27:56,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/56c6d56f8dca438193a4ff259e8ce445 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/56c6d56f8dca438193a4ff259e8ce445 2024-11-21T00:27:56,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/56c6d56f8dca438193a4ff259e8ce445, entries=200, sequenceid=282, filesize=39.0 K 2024-11-21T00:27:56,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/1c4a56519394443384c8078de785c734 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1c4a56519394443384c8078de785c734 2024-11-21T00:27:56,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1c4a56519394443384c8078de785c734, entries=150, sequenceid=282, filesize=12.0 K 2024-11-21T00:27:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/3c2fe42dfa3c443c9be5141bbd579574 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3c2fe42dfa3c443c9be5141bbd579574 2024-11-21T00:27:57,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3c2fe42dfa3c443c9be5141bbd579574, entries=150, sequenceid=282, filesize=12.0 K 2024-11-21T00:27:57,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 542dd37a0a64b62316f21779fd913b59 in 723ms, sequenceid=282, compaction requested=true 2024-11-21T00:27:57,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:57,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:27:57,012 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:57,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:57,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:27:57,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:57,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:27:57,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:27:57,014 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:57,016 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:57,016 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/A is initiating minor compaction (all files) 2024-11-21T00:27:57,016 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/A in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:57,017 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/7878e40473d74b38af7556e2c4436d93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/aaf28f30ea94413c9133d2ee10bc17bc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/56c6d56f8dca438193a4ff259e8ce445] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=100.3 K 2024-11-21T00:27:57,017 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:57,017 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/7878e40473d74b38af7556e2c4436d93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/aaf28f30ea94413c9133d2ee10bc17bc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/56c6d56f8dca438193a4ff259e8ce445] 2024-11-21T00:27:57,017 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:57,017 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/B is initiating minor compaction (all files) 2024-11-21T00:27:57,017 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/B in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:57,017 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e9a309befa3e4ee69e083f235f84d5df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1addba63d17844b784a9bd26d49732e2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1c4a56519394443384c8078de785c734] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=36.3 K 2024-11-21T00:27:57,018 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7878e40473d74b38af7556e2c4436d93, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732148871822 2024-11-21T00:27:57,019 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e9a309befa3e4ee69e083f235f84d5df, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732148871822 2024-11-21T00:27:57,019 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting aaf28f30ea94413c9133d2ee10bc17bc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732148872987 2024-11-21T00:27:57,019 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1addba63d17844b784a9bd26d49732e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732148872987 2024-11-21T00:27:57,019 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56c6d56f8dca438193a4ff259e8ce445, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732148875143 2024-11-21T00:27:57,020 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c4a56519394443384c8078de785c734, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732148875143 2024-11-21T00:27:57,039 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#B#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:57,039 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/2814a2fed81f40dfad962ffac14f7f12 is 50, key is test_row_0/B:col10/1732148876288/Put/seqid=0 2024-11-21T00:27:57,045 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:57,055 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411212a7bb95f535f4bc480ecc8804a45a2c6_542dd37a0a64b62316f21779fd913b59 store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:57,057 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411212a7bb95f535f4bc480ecc8804a45a2c6_542dd37a0a64b62316f21779fd913b59, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:57,057 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411212a7bb95f535f4bc480ecc8804a45a2c6_542dd37a0a64b62316f21779fd913b59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:57,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742023_1199 (size=12949) 2024-11-21T00:27:57,114 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/2814a2fed81f40dfad962ffac14f7f12 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/2814a2fed81f40dfad962ffac14f7f12 2024-11-21T00:27:57,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742024_1200 (size=4469) 2024-11-21T00:27:57,148 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#A#compaction#163 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:57,148 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/39ea7f2240744faf8d1ab30e596e203f is 175, key is test_row_0/A:col10/1732148876288/Put/seqid=0 2024-11-21T00:27:57,157 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/B of 542dd37a0a64b62316f21779fd913b59 into 2814a2fed81f40dfad962ffac14f7f12(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:57,157 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:57,157 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/B, priority=13, startTime=1732148877012; duration=0sec 2024-11-21T00:27:57,157 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:27:57,157 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:B 2024-11-21T00:27:57,157 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:27:57,160 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:27:57,160 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/C is initiating minor compaction (all files) 2024-11-21T00:27:57,160 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/C in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:57,160 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/2ecbd1f205bc4fc389d54b730941c090, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/396ff94f7a5042cc8f2bbd60b2667a20, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3c2fe42dfa3c443c9be5141bbd579574] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=36.3 K 2024-11-21T00:27:57,162 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ecbd1f205bc4fc389d54b730941c090, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732148871822 2024-11-21T00:27:57,163 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 396ff94f7a5042cc8f2bbd60b2667a20, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732148872987 2024-11-21T00:27:57,163 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c2fe42dfa3c443c9be5141bbd579574, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732148875143 2024-11-21T00:27:57,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742025_1201 (size=31903) 2024-11-21T00:27:57,230 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#C#compaction#164 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:27:57,231 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/5de301cc35c446c68e877d627367ef6f is 50, key is test_row_0/C:col10/1732148876288/Put/seqid=0 2024-11-21T00:27:57,297 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742026_1202 (size=12949) 2024-11-21T00:27:57,361 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/5de301cc35c446c68e877d627367ef6f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5de301cc35c446c68e877d627367ef6f 2024-11-21T00:27:57,373 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/C of 542dd37a0a64b62316f21779fd913b59 into 5de301cc35c446c68e877d627367ef6f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:57,373 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:57,373 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/C, priority=13, startTime=1732148877012; duration=0sec 2024-11-21T00:27:57,375 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:57,375 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:C 2024-11-21T00:27:57,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:57,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:27:57,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:57,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:57,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:57,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:57,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:57,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:57,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f3a80ef083974be08edb9a7a2ca50ff5_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148877474/Put/seqid=0 2024-11-21T00:27:57,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742027_1203 (size=12454) 2024-11-21T00:27:57,633 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/39ea7f2240744faf8d1ab30e596e203f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/39ea7f2240744faf8d1ab30e596e203f 2024-11-21T00:27:57,663 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/A of 542dd37a0a64b62316f21779fd913b59 into 39ea7f2240744faf8d1ab30e596e203f(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:27:57,663 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:57,663 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/A, priority=13, startTime=1732148877012; duration=0sec 2024-11-21T00:27:57,663 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:27:57,663 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:A 2024-11-21T00:27:57,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148937668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148937676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148937677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148937682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148937786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148937795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148937795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148937801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:57,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:57,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148937991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148937998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148938004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148938011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,031 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:58,066 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f3a80ef083974be08edb9a7a2ca50ff5_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f3a80ef083974be08edb9a7a2ca50ff5_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:58,075 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/53e46d664c3c41e0a5ec7f11e1dfb072, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:58,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/53e46d664c3c41e0a5ec7f11e1dfb072 is 175, key is test_row_0/A:col10/1732148877474/Put/seqid=0 2024-11-21T00:27:58,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742028_1204 (size=31255) 2024-11-21T00:27:58,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-21T00:27:58,290 INFO [Thread-621 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-21T00:27:58,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:27:58,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-21T00:27:58,293 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:27:58,295 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:27:58,295 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:27:58,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-21T00:27:58,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148938304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148938308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148938313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148938316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-21T00:27:58,449 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-21T00:27:58,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:58,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,543 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/53e46d664c3c41e0a5ec7f11e1dfb072 2024-11-21T00:27:58,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e8060729625843ac8d3341e39594f0b4 is 50, key is test_row_0/B:col10/1732148877474/Put/seqid=0 2024-11-21T00:27:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-21T00:27:58,604 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-21T00:27:58,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:58,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742029_1205 (size=12301) 2024-11-21T00:27:58,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e8060729625843ac8d3341e39594f0b4 2024-11-21T00:27:58,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/536638babbf44736a52858b6717232d5 is 50, key is test_row_0/C:col10/1732148877474/Put/seqid=0 2024-11-21T00:27:58,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742030_1206 (size=12301) 2024-11-21T00:27:58,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-21T00:27:58,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:58,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148938815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148938819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148938825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:27:58,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148938829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-21T00:27:58,911 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:58,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-21T00:27:58,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:58,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:58,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:58,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:59,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:59,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-21T00:27:59,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:59,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:27:59,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:59,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:59,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:27:59,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/536638babbf44736a52858b6717232d5 2024-11-21T00:27:59,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/53e46d664c3c41e0a5ec7f11e1dfb072 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/53e46d664c3c41e0a5ec7f11e1dfb072 2024-11-21T00:27:59,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/53e46d664c3c41e0a5ec7f11e1dfb072, entries=150, sequenceid=297, filesize=30.5 K 2024-11-21T00:27:59,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/e8060729625843ac8d3341e39594f0b4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e8060729625843ac8d3341e39594f0b4 2024-11-21T00:27:59,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e8060729625843ac8d3341e39594f0b4, entries=150, sequenceid=297, filesize=12.0 K 2024-11-21T00:27:59,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/536638babbf44736a52858b6717232d5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/536638babbf44736a52858b6717232d5 2024-11-21T00:27:59,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/536638babbf44736a52858b6717232d5, entries=150, sequenceid=297, filesize=12.0 K 2024-11-21T00:27:59,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 542dd37a0a64b62316f21779fd913b59 in 1658ms, sequenceid=297, compaction requested=false 2024-11-21T00:27:59,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:59,218 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:27:59,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-21T00:27:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:59,220 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-21T00:27:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112127dc479d98294b38b807cb08fca9bc49_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148877661/Put/seqid=0 2024-11-21T00:27:59,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742031_1207 (size=12454) 2024-11-21T00:27:59,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,301 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112127dc479d98294b38b807cb08fca9bc49_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112127dc479d98294b38b807cb08fca9bc49_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:59,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/1dc075bc7648486b920a9497d7a42c4c, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:27:59,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/1dc075bc7648486b920a9497d7a42c4c is 175, key is test_row_0/A:col10/1732148877661/Put/seqid=0 2024-11-21T00:27:59,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742032_1208 (size=31255) 2024-11-21T00:27:59,342 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=322, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/1dc075bc7648486b920a9497d7a42c4c 2024-11-21T00:27:59,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/926a85b4ba184b5e9f59f374b3f780fe is 50, key is test_row_0/B:col10/1732148877661/Put/seqid=0 2024-11-21T00:27:59,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742033_1209 (size=12301) 2024-11-21T00:27:59,401 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/926a85b4ba184b5e9f59f374b3f780fe 2024-11-21T00:27:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-21T00:27:59,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/3fa51b784062440ab4965dd0607f6ebe is 50, key is test_row_0/C:col10/1732148877661/Put/seqid=0 2024-11-21T00:27:59,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742034_1210 (size=12301) 2024-11-21T00:27:59,475 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/3fa51b784062440ab4965dd0607f6ebe 2024-11-21T00:27:59,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/1dc075bc7648486b920a9497d7a42c4c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1dc075bc7648486b920a9497d7a42c4c 2024-11-21T00:27:59,513 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1dc075bc7648486b920a9497d7a42c4c, entries=150, sequenceid=322, filesize=30.5 K 2024-11-21T00:27:59,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/926a85b4ba184b5e9f59f374b3f780fe as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/926a85b4ba184b5e9f59f374b3f780fe 2024-11-21T00:27:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,531 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/926a85b4ba184b5e9f59f374b3f780fe, entries=150, sequenceid=322, filesize=12.0 K 2024-11-21T00:27:59,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/3fa51b784062440ab4965dd0607f6ebe as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3fa51b784062440ab4965dd0607f6ebe 2024-11-21T00:27:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,544 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3fa51b784062440ab4965dd0607f6ebe, entries=150, sequenceid=322, filesize=12.0 K 2024-11-21T00:27:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,553 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 542dd37a0a64b62316f21779fd913b59 in 333ms, sequenceid=322, compaction requested=true 2024-11-21T00:27:59,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:27:59,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:27:59,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-21T00:27:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-21T00:27:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-21T00:27:59,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2590 sec 2024-11-21T00:27:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.2660 sec 2024-11-21T00:27:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:27:59,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:27:59,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:27:59,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:59,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:27:59,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:27:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:27:59,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112141e7ecce3ec94269bf0769350f135db1_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148879937/Put/seqid=0 2024-11-21T00:28:00,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742035_1211 (size=17534) 2024-11-21T00:28:00,013 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:00,022 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112141e7ecce3ec94269bf0769350f135db1_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112141e7ecce3ec94269bf0769350f135db1_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:00,027 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:00,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946 is 175, key is test_row_0/A:col10/1732148879937/Put/seqid=0 2024-11-21T00:28:00,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742036_1212 (size=48639) 2024-11-21T00:28:00,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148940026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148940067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148940072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148940072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148940169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148940175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148940176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148940176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148940375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148940380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148940381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148940381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-21T00:28:00,412 INFO [Thread-621 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-21T00:28:00,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-21T00:28:00,415 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-21T00:28:00,424 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:00,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:00,452 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946 2024-11-21T00:28:00,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/ce4b2fd86b7a416d831be81a9cb3aae4 is 50, key is test_row_0/B:col10/1732148879937/Put/seqid=0 2024-11-21T00:28:00,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742037_1213 (size=12301) 2024-11-21T00:28:00,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/ce4b2fd86b7a416d831be81a9cb3aae4 2024-11-21T00:28:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-21T00:28:00,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/321fcf2da1ad4843a1522d85c29b0e5a is 50, key is test_row_0/C:col10/1732148879937/Put/seqid=0 2024-11-21T00:28:00,577 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:00,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:00,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742038_1214 (size=12301) 2024-11-21T00:28:00,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/321fcf2da1ad4843a1522d85c29b0e5a 2024-11-21T00:28:00,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946 2024-11-21T00:28:00,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946, entries=250, sequenceid=334, filesize=47.5 K 2024-11-21T00:28:00,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/ce4b2fd86b7a416d831be81a9cb3aae4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/ce4b2fd86b7a416d831be81a9cb3aae4 2024-11-21T00:28:00,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/ce4b2fd86b7a416d831be81a9cb3aae4, entries=150, sequenceid=334, filesize=12.0 K 2024-11-21T00:28:00,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/321fcf2da1ad4843a1522d85c29b0e5a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/321fcf2da1ad4843a1522d85c29b0e5a 2024-11-21T00:28:00,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/321fcf2da1ad4843a1522d85c29b0e5a, entries=150, sequenceid=334, filesize=12.0 K 2024-11-21T00:28:00,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 542dd37a0a64b62316f21779fd913b59 in 653ms, sequenceid=334, compaction requested=true 2024-11-21T00:28:00,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:28:00,608 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:00,609 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143052 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:00,609 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/A is initiating minor compaction (all files) 2024-11-21T00:28:00,610 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/A in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,610 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/39ea7f2240744faf8d1ab30e596e203f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/53e46d664c3c41e0a5ec7f11e1dfb072, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1dc075bc7648486b920a9497d7a42c4c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=139.7 K 2024-11-21T00:28:00,610 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,610 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/39ea7f2240744faf8d1ab30e596e203f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/53e46d664c3c41e0a5ec7f11e1dfb072, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1dc075bc7648486b920a9497d7a42c4c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946] 2024-11-21T00:28:00,610 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39ea7f2240744faf8d1ab30e596e203f, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732148875143 2024-11-21T00:28:00,611 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53e46d664c3c41e0a5ec7f11e1dfb072, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732148877461 2024-11-21T00:28:00,611 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dc075bc7648486b920a9497d7a42c4c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732148877643 2024-11-21T00:28:00,611 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae4cc8c9e72b4f8cb9d40b0c2ce8b946, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732148879935 2024-11-21T00:28:00,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:00,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:00,612 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:00,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:00,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:00,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 542dd37a0a64b62316f21779fd913b59:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:00,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:00,618 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:00,618 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/B is initiating minor compaction (all files) 2024-11-21T00:28:00,618 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/B in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,618 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/2814a2fed81f40dfad962ffac14f7f12, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e8060729625843ac8d3341e39594f0b4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/926a85b4ba184b5e9f59f374b3f780fe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/ce4b2fd86b7a416d831be81a9cb3aae4] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=48.7 K 2024-11-21T00:28:00,619 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2814a2fed81f40dfad962ffac14f7f12, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732148875143 2024-11-21T00:28:00,619 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e8060729625843ac8d3341e39594f0b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732148877461 2024-11-21T00:28:00,620 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 926a85b4ba184b5e9f59f374b3f780fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732148877643 2024-11-21T00:28:00,620 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ce4b2fd86b7a416d831be81a9cb3aae4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732148879937 2024-11-21T00:28:00,636 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:00,645 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#B#compaction#175 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:00,646 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/295e87847b0f4b6f8a57fbadb55eb0df is 50, key is test_row_0/B:col10/1732148879937/Put/seqid=0 2024-11-21T00:28:00,662 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121b5a7695ffc044443837e3cda621e677e_542dd37a0a64b62316f21779fd913b59 store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:00,665 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121b5a7695ffc044443837e3cda621e677e_542dd37a0a64b62316f21779fd913b59, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:00,665 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b5a7695ffc044443837e3cda621e677e_542dd37a0a64b62316f21779fd913b59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:00,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:00,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-21T00:28:00,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:28:00,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:00,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:28:00,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:00,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:28:00,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:00,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742039_1215 (size=13085) 2024-11-21T00:28:00,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148940712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148940713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148940716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-21T00:28:00,730 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742040_1216 (size=4469) 2024-11-21T00:28:00,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148940724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,740 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#A#compaction#174 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:00,741 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/32fcfe08a9be433c84af65dae176cf95 is 175, key is test_row_0/A:col10/1732148879937/Put/seqid=0 2024-11-21T00:28:00,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:00,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:00,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411217cb732c89e7a4e22bd938b30fc4f230d_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148880009/Put/seqid=0 2024-11-21T00:28:00,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742041_1217 (size=32039) 2024-11-21T00:28:00,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148940818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148940818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,825 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/32fcfe08a9be433c84af65dae176cf95 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/32fcfe08a9be433c84af65dae176cf95 2024-11-21T00:28:00,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742042_1218 (size=14994) 2024-11-21T00:28:00,827 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:00,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148940823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,836 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411217cb732c89e7a4e22bd938b30fc4f230d_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411217cb732c89e7a4e22bd938b30fc4f230d_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:00,838 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/A of 542dd37a0a64b62316f21779fd913b59 into 32fcfe08a9be433c84af65dae176cf95(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:00,838 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/c71c4e5201a64e47a375559049ccdf2e, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:00,838 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:28:00,838 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/A, priority=12, startTime=1732148880608; duration=0sec 2024-11-21T00:28:00,838 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:00,838 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:A 2024-11-21T00:28:00,838 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:00,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/c71c4e5201a64e47a375559049ccdf2e is 175, key is test_row_0/A:col10/1732148880009/Put/seqid=0 2024-11-21T00:28:00,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:00,841 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 542dd37a0a64b62316f21779fd913b59/C is initiating minor compaction (all files) 2024-11-21T00:28:00,841 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 542dd37a0a64b62316f21779fd913b59/C in TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,841 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5de301cc35c446c68e877d627367ef6f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/536638babbf44736a52858b6717232d5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3fa51b784062440ab4965dd0607f6ebe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/321fcf2da1ad4843a1522d85c29b0e5a] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp, totalSize=48.7 K 2024-11-21T00:28:00,842 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5de301cc35c446c68e877d627367ef6f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732148875143 2024-11-21T00:28:00,842 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 536638babbf44736a52858b6717232d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732148877461 2024-11-21T00:28:00,843 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fa51b784062440ab4965dd0607f6ebe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732148877643 2024-11-21T00:28:00,843 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 321fcf2da1ad4843a1522d85c29b0e5a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732148879937 2024-11-21T00:28:00,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:00,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148940840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742043_1219 (size=39949) 2024-11-21T00:28:00,867 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 542dd37a0a64b62316f21779fd913b59#C#compaction#177 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:00,868 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/6ce6e9fd28244b7c9e2d5920bbfad7e4 is 50, key is test_row_0/C:col10/1732148879937/Put/seqid=0 2024-11-21T00:28:00,893 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:00,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:00,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:00,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:00,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:00,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742044_1220 (size=13085) 2024-11-21T00:28:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-21T00:28:01,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148941022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148941030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148941023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,046 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:01,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:01,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148941048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,128 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/295e87847b0f4b6f8a57fbadb55eb0df as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/295e87847b0f4b6f8a57fbadb55eb0df 2024-11-21T00:28:01,140 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/B of 542dd37a0a64b62316f21779fd913b59 into 295e87847b0f4b6f8a57fbadb55eb0df(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:01,140 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:28:01,140 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/B, priority=12, startTime=1732148880611; duration=0sec 2024-11-21T00:28:01,142 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:01,142 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:B 2024-11-21T00:28:01,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:01,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:01,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,270 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=361, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/c71c4e5201a64e47a375559049ccdf2e 2024-11-21T00:28:01,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/c06c911f410e4aa29d607ac78443355c is 50, key is test_row_0/B:col10/1732148880009/Put/seqid=0 2024-11-21T00:28:01,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148941331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148941343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148941343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742045_1221 (size=12301) 2024-11-21T00:28:01,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/c06c911f410e4aa29d607ac78443355c 2024-11-21T00:28:01,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148941352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,422 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/6ce6e9fd28244b7c9e2d5920bbfad7e4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/6ce6e9fd28244b7c9e2d5920bbfad7e4 2024-11-21T00:28:01,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:01,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:01,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/037594ef6d99426c98362f82cc2c94cd is 50, key is test_row_0/C:col10/1732148880009/Put/seqid=0 2024-11-21T00:28:01,454 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 542dd37a0a64b62316f21779fd913b59/C of 542dd37a0a64b62316f21779fd913b59 into 6ce6e9fd28244b7c9e2d5920bbfad7e4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:01,454 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:28:01,454 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59., storeName=542dd37a0a64b62316f21779fd913b59/C, priority=12, startTime=1732148880612; duration=0sec 2024-11-21T00:28:01,454 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:01,454 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 542dd37a0a64b62316f21779fd913b59:C 2024-11-21T00:28:01,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742046_1222 (size=12301) 2024-11-21T00:28:01,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-21T00:28:01,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:01,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:01,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,730 DEBUG [Thread-622 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ec46f90 to 127.0.0.1:64241 2024-11-21T00:28:01,730 DEBUG [Thread-622 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:01,732 DEBUG [Thread-624 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f63b68c to 127.0.0.1:64241 2024-11-21T00:28:01,732 DEBUG [Thread-624 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:01,734 DEBUG [Thread-626 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x473f181f to 127.0.0.1:64241 2024-11-21T00:28:01,735 DEBUG [Thread-626 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:01,736 DEBUG [Thread-628 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x768577a2 to 127.0.0.1:64241 2024-11-21T00:28:01,736 DEBUG [Thread-628 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:01,743 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:01,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:01,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,747 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47778 deadline: 1732148941846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47762 deadline: 1732148941853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47722 deadline: 1732148941854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47748 deadline: 1732148941863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/037594ef6d99426c98362f82cc2c94cd 2024-11-21T00:28:01,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/c71c4e5201a64e47a375559049ccdf2e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/c71c4e5201a64e47a375559049ccdf2e 2024-11-21T00:28:01,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/c71c4e5201a64e47a375559049ccdf2e, entries=200, sequenceid=361, filesize=39.0 K 2024-11-21T00:28:01,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/c06c911f410e4aa29d607ac78443355c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/c06c911f410e4aa29d607ac78443355c 2024-11-21T00:28:01,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:01,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:01,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:01,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:01,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:01,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/c06c911f410e4aa29d607ac78443355c, entries=150, sequenceid=361, filesize=12.0 K 2024-11-21T00:28:01,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/037594ef6d99426c98362f82cc2c94cd as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/037594ef6d99426c98362f82cc2c94cd 2024-11-21T00:28:01,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/037594ef6d99426c98362f82cc2c94cd, entries=150, sequenceid=361, filesize=12.0 K 2024-11-21T00:28:01,914 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 542dd37a0a64b62316f21779fd913b59 in 1223ms, sequenceid=361, compaction requested=false 2024-11-21T00:28:01,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:28:02,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:02,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-21T00:28:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:02,053 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-21T00:28:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:28:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:28:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:02,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:28:02,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:02,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d5fb034239964197ba44467164825c56_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148880715/Put/seqid=0 2024-11-21T00:28:02,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742047_1223 (size=12454) 2024-11-21T00:28:02,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:02,469 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d5fb034239964197ba44467164825c56_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d5fb034239964197ba44467164825c56_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:02,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9ab2e07f693d4411be85a330172b8e82, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:02,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9ab2e07f693d4411be85a330172b8e82 is 175, key is test_row_0/A:col10/1732148880715/Put/seqid=0 2024-11-21T00:28:02,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742048_1224 (size=31255) 2024-11-21T00:28:02,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-21T00:28:02,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. as already flushing 2024-11-21T00:28:02,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:02,855 DEBUG [Thread-613 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53af6163 to 127.0.0.1:64241 2024-11-21T00:28:02,855 DEBUG [Thread-613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:02,858 DEBUG [Thread-617 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32168855 to 127.0.0.1:64241 2024-11-21T00:28:02,858 DEBUG [Thread-617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:02,877 DEBUG [Thread-615 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15736fcc to 127.0.0.1:64241 2024-11-21T00:28:02,877 DEBUG [Thread-615 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:02,878 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=374, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9ab2e07f693d4411be85a330172b8e82 2024-11-21T00:28:02,878 DEBUG [Thread-619 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40832d66 to 127.0.0.1:64241 2024-11-21T00:28:02,878 DEBUG [Thread-619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:02,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/9b00040defbf478a83d2d774327cedff is 50, key is test_row_0/B:col10/1732148880715/Put/seqid=0 2024-11-21T00:28:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742049_1225 (size=12301) 2024-11-21T00:28:03,297 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/9b00040defbf478a83d2d774327cedff 2024-11-21T00:28:03,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/b9bbc9664e4048a09f16c7897a7f671c is 50, key is test_row_0/C:col10/1732148880715/Put/seqid=0 2024-11-21T00:28:03,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742050_1226 (size=12301) 2024-11-21T00:28:03,322 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/b9bbc9664e4048a09f16c7897a7f671c 2024-11-21T00:28:03,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/9ab2e07f693d4411be85a330172b8e82 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9ab2e07f693d4411be85a330172b8e82 2024-11-21T00:28:03,334 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9ab2e07f693d4411be85a330172b8e82, entries=150, sequenceid=374, filesize=30.5 K 2024-11-21T00:28:03,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/9b00040defbf478a83d2d774327cedff as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/9b00040defbf478a83d2d774327cedff 2024-11-21T00:28:03,343 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/9b00040defbf478a83d2d774327cedff, entries=150, sequenceid=374, filesize=12.0 K 2024-11-21T00:28:03,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/b9bbc9664e4048a09f16c7897a7f671c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b9bbc9664e4048a09f16c7897a7f671c 2024-11-21T00:28:03,350 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b9bbc9664e4048a09f16c7897a7f671c, entries=150, sequenceid=374, filesize=12.0 K 2024-11-21T00:28:03,351 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=26.84 KB/27480 for 542dd37a0a64b62316f21779fd913b59 in 1298ms, sequenceid=374, compaction requested=true 2024-11-21T00:28:03,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:28:03,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:03,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-21T00:28:03,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-21T00:28:03,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-21T00:28:03,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9290 sec 2024-11-21T00:28:03,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.9450 sec 2024-11-21T00:28:03,902 DEBUG [Thread-611 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2aa409d0 to 127.0.0.1:64241 2024-11-21T00:28:03,903 DEBUG [Thread-611 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:04,224 DEBUG [master/0e7930017ff8:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3572e63abe2094af1c626c3e96fc06ec changed from -1.0 to 0.0, refreshing cache 2024-11-21T00:28:04,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-21T00:28:04,529 INFO [Thread-621 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2732 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2605 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1131 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3393 rows 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1143 2024-11-21T00:28:04,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3429 rows 2024-11-21T00:28:04,529 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-21T00:28:04,529 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3242ee55 to 127.0.0.1:64241 2024-11-21T00:28:04,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:04,533 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-21T00:28:04,533 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-21T00:28:04,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:04,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-21T00:28:04,538 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148884537"}]},"ts":"1732148884537"} 2024-11-21T00:28:04,539 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-21T00:28:04,548 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-21T00:28:04,549 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:28:04,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, UNASSIGN}] 2024-11-21T00:28:04,552 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, UNASSIGN 2024-11-21T00:28:04,552 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:04,554 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:28:04,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:28:04,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-21T00:28:04,706 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:04,707 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:04,707 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:28:04,707 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing 542dd37a0a64b62316f21779fd913b59, disabling compactions & flushes 2024-11-21T00:28:04,707 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:04,707 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. after waiting 0 ms 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:04,708 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing 542dd37a0a64b62316f21779fd913b59 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=A 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=B 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 542dd37a0a64b62316f21779fd913b59, store=C 2024-11-21T00:28:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:04,729 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121179c9f731af94521ae6d53b959f93774_542dd37a0a64b62316f21779fd913b59 is 50, key is test_row_0/A:col10/1732148882864/Put/seqid=0 2024-11-21T00:28:04,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742051_1227 (size=12454) 2024-11-21T00:28:04,771 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:04,779 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121179c9f731af94521ae6d53b959f93774_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121179c9f731af94521ae6d53b959f93774_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:04,782 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/0cce5e2c5d154f10be7f61a850ab7199, store: [table=TestAcidGuarantees family=A region=542dd37a0a64b62316f21779fd913b59] 2024-11-21T00:28:04,782 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/0cce5e2c5d154f10be7f61a850ab7199 is 175, key is test_row_0/A:col10/1732148882864/Put/seqid=0 2024-11-21T00:28:04,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742052_1228 (size=31255) 2024-11-21T00:28:04,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-21T00:28:05,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-21T00:28:05,197 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=382, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/0cce5e2c5d154f10be7f61a850ab7199 2024-11-21T00:28:05,205 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/663b196353174d50b4f46f9dd1dd8959 is 50, key is test_row_0/B:col10/1732148882864/Put/seqid=0 2024-11-21T00:28:05,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742053_1229 (size=12301) 2024-11-21T00:28:05,610 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/663b196353174d50b4f46f9dd1dd8959 2024-11-21T00:28:05,618 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/a251f683ca864955a6b911ff1ce42aa4 is 50, key is test_row_0/C:col10/1732148882864/Put/seqid=0 2024-11-21T00:28:05,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742054_1230 (size=12301) 2024-11-21T00:28:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-21T00:28:06,023 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/a251f683ca864955a6b911ff1ce42aa4 2024-11-21T00:28:06,035 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/A/0cce5e2c5d154f10be7f61a850ab7199 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/0cce5e2c5d154f10be7f61a850ab7199 2024-11-21T00:28:06,056 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/0cce5e2c5d154f10be7f61a850ab7199, entries=150, sequenceid=382, filesize=30.5 K 2024-11-21T00:28:06,058 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/B/663b196353174d50b4f46f9dd1dd8959 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663b196353174d50b4f46f9dd1dd8959 2024-11-21T00:28:06,064 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663b196353174d50b4f46f9dd1dd8959, entries=150, sequenceid=382, filesize=12.0 K 2024-11-21T00:28:06,065 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/.tmp/C/a251f683ca864955a6b911ff1ce42aa4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a251f683ca864955a6b911ff1ce42aa4 2024-11-21T00:28:06,072 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a251f683ca864955a6b911ff1ce42aa4, entries=150, sequenceid=382, filesize=12.0 K 2024-11-21T00:28:06,075 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 542dd37a0a64b62316f21779fd913b59 in 1367ms, sequenceid=382, compaction requested=true 2024-11-21T00:28:06,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/55a444baf680485482422cb0c1de086f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2590078960ad43fa9121715b9b0b3d6a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/50f1a37e61114c1a9fe30888508c4274, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/498f1a6a418c4fef894b7e74f70419c8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/99c13de71d444780b42b7223c7944b0d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/a98a8a9a74044679b68709371f8dbc04, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6eb5ec83f3da4e7cb08bab3593439c34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2719ec3106c849d9a8584401e20d6c46, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6b1f096377c44e5398134c6f5265900e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1247353d74514c26a73abb9b5a9feb7a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ced0aa55a3704e5991e2d78a04fcdbe8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9d57323fe7f2454886aaf5be197e2162, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/22cab42e5cc347c3a16313ab8bed511d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/eca0ab5c971b4676a63ce76cc1674998, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/5bc2b43ec2544be7ac47931692c3b7d5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/7878e40473d74b38af7556e2c4436d93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/3654182756a54384ae60b643e3f41c5f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/aaf28f30ea94413c9133d2ee10bc17bc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/56c6d56f8dca438193a4ff259e8ce445, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/39ea7f2240744faf8d1ab30e596e203f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/53e46d664c3c41e0a5ec7f11e1dfb072, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1dc075bc7648486b920a9497d7a42c4c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946] to archive 2024-11-21T00:28:06,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:28:06,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/55a444baf680485482422cb0c1de086f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/55a444baf680485482422cb0c1de086f 2024-11-21T00:28:06,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2590078960ad43fa9121715b9b0b3d6a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2590078960ad43fa9121715b9b0b3d6a 2024-11-21T00:28:06,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/50f1a37e61114c1a9fe30888508c4274 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/50f1a37e61114c1a9fe30888508c4274 2024-11-21T00:28:06,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/498f1a6a418c4fef894b7e74f70419c8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/498f1a6a418c4fef894b7e74f70419c8 2024-11-21T00:28:06,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/99c13de71d444780b42b7223c7944b0d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/99c13de71d444780b42b7223c7944b0d 2024-11-21T00:28:06,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/a98a8a9a74044679b68709371f8dbc04 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/a98a8a9a74044679b68709371f8dbc04 2024-11-21T00:28:06,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6eb5ec83f3da4e7cb08bab3593439c34 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6eb5ec83f3da4e7cb08bab3593439c34 2024-11-21T00:28:06,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2719ec3106c849d9a8584401e20d6c46 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/2719ec3106c849d9a8584401e20d6c46 2024-11-21T00:28:06,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6b1f096377c44e5398134c6f5265900e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/6b1f096377c44e5398134c6f5265900e 2024-11-21T00:28:06,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1247353d74514c26a73abb9b5a9feb7a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1247353d74514c26a73abb9b5a9feb7a 2024-11-21T00:28:06,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ced0aa55a3704e5991e2d78a04fcdbe8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ced0aa55a3704e5991e2d78a04fcdbe8 2024-11-21T00:28:06,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9d57323fe7f2454886aaf5be197e2162 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9d57323fe7f2454886aaf5be197e2162 2024-11-21T00:28:06,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/22cab42e5cc347c3a16313ab8bed511d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/22cab42e5cc347c3a16313ab8bed511d 2024-11-21T00:28:06,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/eca0ab5c971b4676a63ce76cc1674998 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/eca0ab5c971b4676a63ce76cc1674998 2024-11-21T00:28:06,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/5bc2b43ec2544be7ac47931692c3b7d5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/5bc2b43ec2544be7ac47931692c3b7d5 2024-11-21T00:28:06,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/7878e40473d74b38af7556e2c4436d93 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/7878e40473d74b38af7556e2c4436d93 2024-11-21T00:28:06,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/3654182756a54384ae60b643e3f41c5f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/3654182756a54384ae60b643e3f41c5f 2024-11-21T00:28:06,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/aaf28f30ea94413c9133d2ee10bc17bc to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/aaf28f30ea94413c9133d2ee10bc17bc 2024-11-21T00:28:06,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/56c6d56f8dca438193a4ff259e8ce445 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/56c6d56f8dca438193a4ff259e8ce445 2024-11-21T00:28:06,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/39ea7f2240744faf8d1ab30e596e203f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/39ea7f2240744faf8d1ab30e596e203f 2024-11-21T00:28:06,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/53e46d664c3c41e0a5ec7f11e1dfb072 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/53e46d664c3c41e0a5ec7f11e1dfb072 2024-11-21T00:28:06,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1dc075bc7648486b920a9497d7a42c4c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/1dc075bc7648486b920a9497d7a42c4c 2024-11-21T00:28:06,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/ae4cc8c9e72b4f8cb9d40b0c2ce8b946 2024-11-21T00:28:06,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/97d964d4b6444daba3de2c5a61e9376d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/b0a4163b98cd4e39a313a2c66967ad0e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663bf093484c43fc84b0b499a7f083ad, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/91ffe764c8304f7fae542f8e7abfc295, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/283a4789e80a411ba38a055192cc637c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/62d8e210c62f4ab7b8331fc6510fad29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/031d306a1ac441d182387f61a7da11b8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e1ab9e9e2d9949aaab8373980ed15f55, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e422160cb4dd46158d3ab4b454ff318b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5b1aa2832ec244e5ba4b85b558a9a85a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5080bbf11838431d824301a4f9d1bd7c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4a0a919615a42dab5beea1ea965655d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1e128ce42c1048e28d3163e2e752d2ce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4453ecc5fe849629548e3f9f7b2fd0b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/f031d1191b954808b94c8f963fa16a00, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e9a309befa3e4ee69e083f235f84d5df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/bdf551c86a014c0b81573b26be90a2bc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1addba63d17844b784a9bd26d49732e2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/2814a2fed81f40dfad962ffac14f7f12, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1c4a56519394443384c8078de785c734, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e8060729625843ac8d3341e39594f0b4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/926a85b4ba184b5e9f59f374b3f780fe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/ce4b2fd86b7a416d831be81a9cb3aae4] to archive 2024-11-21T00:28:06,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:28:06,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/97d964d4b6444daba3de2c5a61e9376d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/97d964d4b6444daba3de2c5a61e9376d 2024-11-21T00:28:06,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/b0a4163b98cd4e39a313a2c66967ad0e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/b0a4163b98cd4e39a313a2c66967ad0e 2024-11-21T00:28:06,196 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663bf093484c43fc84b0b499a7f083ad to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663bf093484c43fc84b0b499a7f083ad 2024-11-21T00:28:06,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/91ffe764c8304f7fae542f8e7abfc295 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/91ffe764c8304f7fae542f8e7abfc295 2024-11-21T00:28:06,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/283a4789e80a411ba38a055192cc637c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/283a4789e80a411ba38a055192cc637c 2024-11-21T00:28:06,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/62d8e210c62f4ab7b8331fc6510fad29 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/62d8e210c62f4ab7b8331fc6510fad29 2024-11-21T00:28:06,209 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/031d306a1ac441d182387f61a7da11b8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/031d306a1ac441d182387f61a7da11b8 2024-11-21T00:28:06,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e1ab9e9e2d9949aaab8373980ed15f55 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e1ab9e9e2d9949aaab8373980ed15f55 2024-11-21T00:28:06,214 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e422160cb4dd46158d3ab4b454ff318b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e422160cb4dd46158d3ab4b454ff318b 2024-11-21T00:28:06,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5b1aa2832ec244e5ba4b85b558a9a85a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5b1aa2832ec244e5ba4b85b558a9a85a 2024-11-21T00:28:06,219 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5080bbf11838431d824301a4f9d1bd7c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/5080bbf11838431d824301a4f9d1bd7c 2024-11-21T00:28:06,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4a0a919615a42dab5beea1ea965655d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4a0a919615a42dab5beea1ea965655d 2024-11-21T00:28:06,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1e128ce42c1048e28d3163e2e752d2ce to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1e128ce42c1048e28d3163e2e752d2ce 2024-11-21T00:28:06,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4453ecc5fe849629548e3f9f7b2fd0b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/a4453ecc5fe849629548e3f9f7b2fd0b 2024-11-21T00:28:06,226 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/f031d1191b954808b94c8f963fa16a00 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/f031d1191b954808b94c8f963fa16a00 2024-11-21T00:28:06,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e9a309befa3e4ee69e083f235f84d5df to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e9a309befa3e4ee69e083f235f84d5df 2024-11-21T00:28:06,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/bdf551c86a014c0b81573b26be90a2bc to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/bdf551c86a014c0b81573b26be90a2bc 2024-11-21T00:28:06,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1addba63d17844b784a9bd26d49732e2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1addba63d17844b784a9bd26d49732e2 2024-11-21T00:28:06,232 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/2814a2fed81f40dfad962ffac14f7f12 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/2814a2fed81f40dfad962ffac14f7f12 2024-11-21T00:28:06,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1c4a56519394443384c8078de785c734 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/1c4a56519394443384c8078de785c734 2024-11-21T00:28:06,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e8060729625843ac8d3341e39594f0b4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/e8060729625843ac8d3341e39594f0b4 2024-11-21T00:28:06,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/926a85b4ba184b5e9f59f374b3f780fe to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/926a85b4ba184b5e9f59f374b3f780fe 2024-11-21T00:28:06,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/ce4b2fd86b7a416d831be81a9cb3aae4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/ce4b2fd86b7a416d831be81a9cb3aae4 2024-11-21T00:28:06,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/844831a09bbd4e238017df022eba1e93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/9d97f630096345bcb0061f6a4f460eaf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/c33db9fcc1654be2a6cc92cd226b2e8f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4b6c1bca14274600b7cba7daea0bbc27, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/1d7922d5c1144555b618fbc757e0235b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ed5d78e249b74fbeb6715ee41f8533e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/426b7bd15e2743c0a49d476680be8417, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b0ebedb5b2f041c488985ed093313919, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4212bd5e4c9d4b8b98a39c17dd5b43de, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ce5c79421c334e6c86e1038ebbf69962, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a77764b0073f454cbc9ab26e64172d41, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/01afbfcbe8af47de964dfa65fecb72ba, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/da40b3f40ab942b19e76d09c2fb07eb1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/d3b6dbd5d4644bba9dab5aa6c546645d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/e5c0ee8ab720443385e1b92fb0427c44, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/2ecbd1f205bc4fc389d54b730941c090, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5842291152af4977bb66668ff2c0b37f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/396ff94f7a5042cc8f2bbd60b2667a20, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5de301cc35c446c68e877d627367ef6f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3c2fe42dfa3c443c9be5141bbd579574, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/536638babbf44736a52858b6717232d5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3fa51b784062440ab4965dd0607f6ebe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/321fcf2da1ad4843a1522d85c29b0e5a] to archive 2024-11-21T00:28:06,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:28:06,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/844831a09bbd4e238017df022eba1e93 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/844831a09bbd4e238017df022eba1e93 2024-11-21T00:28:06,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/9d97f630096345bcb0061f6a4f460eaf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/9d97f630096345bcb0061f6a4f460eaf 2024-11-21T00:28:06,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/c33db9fcc1654be2a6cc92cd226b2e8f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/c33db9fcc1654be2a6cc92cd226b2e8f 2024-11-21T00:28:06,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4b6c1bca14274600b7cba7daea0bbc27 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4b6c1bca14274600b7cba7daea0bbc27 2024-11-21T00:28:06,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/1d7922d5c1144555b618fbc757e0235b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/1d7922d5c1144555b618fbc757e0235b 2024-11-21T00:28:06,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ed5d78e249b74fbeb6715ee41f8533e5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ed5d78e249b74fbeb6715ee41f8533e5 2024-11-21T00:28:06,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/426b7bd15e2743c0a49d476680be8417 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/426b7bd15e2743c0a49d476680be8417 2024-11-21T00:28:06,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b0ebedb5b2f041c488985ed093313919 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b0ebedb5b2f041c488985ed093313919 2024-11-21T00:28:06,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4212bd5e4c9d4b8b98a39c17dd5b43de to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/4212bd5e4c9d4b8b98a39c17dd5b43de 2024-11-21T00:28:06,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ce5c79421c334e6c86e1038ebbf69962 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/ce5c79421c334e6c86e1038ebbf69962 2024-11-21T00:28:06,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a77764b0073f454cbc9ab26e64172d41 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a77764b0073f454cbc9ab26e64172d41 2024-11-21T00:28:06,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/01afbfcbe8af47de964dfa65fecb72ba to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/01afbfcbe8af47de964dfa65fecb72ba 2024-11-21T00:28:06,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/da40b3f40ab942b19e76d09c2fb07eb1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/da40b3f40ab942b19e76d09c2fb07eb1 2024-11-21T00:28:06,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/d3b6dbd5d4644bba9dab5aa6c546645d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/d3b6dbd5d4644bba9dab5aa6c546645d 2024-11-21T00:28:06,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/e5c0ee8ab720443385e1b92fb0427c44 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/e5c0ee8ab720443385e1b92fb0427c44 2024-11-21T00:28:06,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/2ecbd1f205bc4fc389d54b730941c090 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/2ecbd1f205bc4fc389d54b730941c090 2024-11-21T00:28:06,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5842291152af4977bb66668ff2c0b37f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5842291152af4977bb66668ff2c0b37f 2024-11-21T00:28:06,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/396ff94f7a5042cc8f2bbd60b2667a20 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/396ff94f7a5042cc8f2bbd60b2667a20 2024-11-21T00:28:06,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5de301cc35c446c68e877d627367ef6f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/5de301cc35c446c68e877d627367ef6f 2024-11-21T00:28:06,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3c2fe42dfa3c443c9be5141bbd579574 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3c2fe42dfa3c443c9be5141bbd579574 2024-11-21T00:28:06,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/536638babbf44736a52858b6717232d5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/536638babbf44736a52858b6717232d5 2024-11-21T00:28:06,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3fa51b784062440ab4965dd0607f6ebe to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/3fa51b784062440ab4965dd0607f6ebe 2024-11-21T00:28:06,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/321fcf2da1ad4843a1522d85c29b0e5a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/321fcf2da1ad4843a1522d85c29b0e5a 2024-11-21T00:28:06,319 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/recovered.edits/385.seqid, newMaxSeqId=385, maxSeqId=4 2024-11-21T00:28:06,320 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59. 2024-11-21T00:28:06,320 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for 542dd37a0a64b62316f21779fd913b59: 2024-11-21T00:28:06,322 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed 542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,322 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=542dd37a0a64b62316f21779fd913b59, regionState=CLOSED 2024-11-21T00:28:06,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-21T00:28:06,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure 542dd37a0a64b62316f21779fd913b59, server=0e7930017ff8,37961,1732148819586 in 1.7700 sec 2024-11-21T00:28:06,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-11-21T00:28:06,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=542dd37a0a64b62316f21779fd913b59, UNASSIGN in 1.7750 sec 2024-11-21T00:28:06,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-21T00:28:06,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7790 sec 2024-11-21T00:28:06,331 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148886331"}]},"ts":"1732148886331"} 2024-11-21T00:28:06,332 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-21T00:28:06,373 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-21T00:28:06,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8410 sec 2024-11-21T00:28:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-21T00:28:06,644 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-21T00:28:06,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-21T00:28:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:06,647 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:06,647 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:06,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-21T00:28:06,651 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,654 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/recovered.edits] 2024-11-21T00:28:06,658 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/0cce5e2c5d154f10be7f61a850ab7199 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/0cce5e2c5d154f10be7f61a850ab7199 2024-11-21T00:28:06,660 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/32fcfe08a9be433c84af65dae176cf95 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/32fcfe08a9be433c84af65dae176cf95 2024-11-21T00:28:06,661 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9ab2e07f693d4411be85a330172b8e82 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/9ab2e07f693d4411be85a330172b8e82 2024-11-21T00:28:06,663 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/c71c4e5201a64e47a375559049ccdf2e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/A/c71c4e5201a64e47a375559049ccdf2e 2024-11-21T00:28:06,666 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/295e87847b0f4b6f8a57fbadb55eb0df to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/295e87847b0f4b6f8a57fbadb55eb0df 2024-11-21T00:28:06,668 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663b196353174d50b4f46f9dd1dd8959 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/663b196353174d50b4f46f9dd1dd8959 2024-11-21T00:28:06,670 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/9b00040defbf478a83d2d774327cedff to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/9b00040defbf478a83d2d774327cedff 2024-11-21T00:28:06,672 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/c06c911f410e4aa29d607ac78443355c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/B/c06c911f410e4aa29d607ac78443355c 2024-11-21T00:28:06,675 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/037594ef6d99426c98362f82cc2c94cd to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/037594ef6d99426c98362f82cc2c94cd 2024-11-21T00:28:06,677 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/6ce6e9fd28244b7c9e2d5920bbfad7e4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/6ce6e9fd28244b7c9e2d5920bbfad7e4 2024-11-21T00:28:06,679 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a251f683ca864955a6b911ff1ce42aa4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/a251f683ca864955a6b911ff1ce42aa4 2024-11-21T00:28:06,680 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b9bbc9664e4048a09f16c7897a7f671c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/C/b9bbc9664e4048a09f16c7897a7f671c 2024-11-21T00:28:06,684 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/recovered.edits/385.seqid to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59/recovered.edits/385.seqid 2024-11-21T00:28:06,684 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,684 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-21T00:28:06,685 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-21T00:28:06,686 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-21T00:28:06,690 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112111a251505cae418e92fc98818cb5183e_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112111a251505cae418e92fc98818cb5183e_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,691 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121179c9f731af94521ae6d53b959f93774_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121179c9f731af94521ae6d53b959f93774_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,693 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211a0705256d30436f8bf40f927b33ef83_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211a0705256d30436f8bf40f927b33ef83_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,694 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112122be52ca0093413a9f68549e7a7e018f_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112122be52ca0093413a9f68549e7a7e018f_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,695 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112123c4b572b4c5475eb783e28168f80c11_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112123c4b572b4c5475eb783e28168f80c11_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,697 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112127dc479d98294b38b807cb08fca9bc49_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112127dc479d98294b38b807cb08fca9bc49_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,699 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112141e7ecce3ec94269bf0769350f135db1_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112141e7ecce3ec94269bf0769350f135db1_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,701 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121489189f1beeb4d38859a280c4246217f_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121489189f1beeb4d38859a280c4246217f_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,702 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411214f3ae2cba6c64a468d9e77646b068d2e_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411214f3ae2cba6c64a468d9e77646b068d2e_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,704 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411217bf3c6e6808d44e0b601d01a3d783e09_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411217bf3c6e6808d44e0b601d01a3d783e09_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,708 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411217cb732c89e7a4e22bd938b30fc4f230d_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411217cb732c89e7a4e22bd938b30fc4f230d_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,710 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411218bcab1e72c654e46a3ea808107da56cb_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411218bcab1e72c654e46a3ea808107da56cb_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,711 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b664345daab04d82b7a2d5fb8995ba6b_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b664345daab04d82b7a2d5fb8995ba6b_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,713 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ca30f32c187e468fb793a8ea426a7c0e_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ca30f32c187e468fb793a8ea426a7c0e_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,715 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d5fb034239964197ba44467164825c56_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d5fb034239964197ba44467164825c56_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,716 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ec3ef1557d254475b19b59e6a137fef6_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ec3ef1557d254475b19b59e6a137fef6_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,718 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f1b8b0f840aa430593bdcaffed2970d5_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f1b8b0f840aa430593bdcaffed2970d5_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,719 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f34e8edd2a4a4ba8980819435b4395ff_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f34e8edd2a4a4ba8980819435b4395ff_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,721 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f3a80ef083974be08edb9a7a2ca50ff5_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f3a80ef083974be08edb9a7a2ca50ff5_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,722 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121fd9fa88565cf4a75a4fed6bf6feb309d_542dd37a0a64b62316f21779fd913b59 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121fd9fa88565cf4a75a4fed6bf6feb309d_542dd37a0a64b62316f21779fd913b59 2024-11-21T00:28:06,723 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-21T00:28:06,727 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:06,737 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-21T00:28:06,739 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-21T00:28:06,741 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:06,741 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-21T00:28:06,741 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732148886741"}]},"ts":"9223372036854775807"} 2024-11-21T00:28:06,746 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-21T00:28:06,746 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 542dd37a0a64b62316f21779fd913b59, NAME => 'TestAcidGuarantees,,1732148858323.542dd37a0a64b62316f21779fd913b59.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T00:28:06,746 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-21T00:28:06,747 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732148886746"}]},"ts":"9223372036854775807"} 2024-11-21T00:28:06,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-21T00:28:06,750 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-21T00:28:06,799 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:06,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 156 msec 2024-11-21T00:28:06,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-21T00:28:06,950 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-21T00:28:06,960 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=241 (was 238) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1912749785_22 at /127.0.0.1:55638 [Waiting for operation #161] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_407452427_22 at /127.0.0.1:39864 [Waiting for operation #412] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1912749785_22 at /127.0.0.1:43412 [Waiting for operation #397] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_407452427_22 at /127.0.0.1:55626 [Waiting for operation #158] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x14e842c7-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=459 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=788 (was 596) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3008 (was 3227) 2024-11-21T00:28:06,969 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=241, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=788, ProcessCount=11, AvailableMemoryMB=3008 2024-11-21T00:28:06,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:28:06,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:06,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:06,973 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:06,973 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:06,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-11-21T00:28:06,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-21T00:28:06,974 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:06,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742055_1231 (size=963) 2024-11-21T00:28:06,989 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:28:07,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742056_1232 (size=53) 2024-11-21T00:28:07,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-21T00:28:07,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-21T00:28:07,414 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:07,414 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1847e2b1a8a082929629d13ff179eb01, disabling compactions & flushes 2024-11-21T00:28:07,414 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,414 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,414 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. after waiting 0 ms 2024-11-21T00:28:07,414 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,414 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,414 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:07,415 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:07,415 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732148887415"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148887415"}]},"ts":"1732148887415"} 2024-11-21T00:28:07,416 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-21T00:28:07,416 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:07,417 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148887417"}]},"ts":"1732148887417"} 2024-11-21T00:28:07,417 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-21T00:28:07,436 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1847e2b1a8a082929629d13ff179eb01, ASSIGN}] 2024-11-21T00:28:07,437 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1847e2b1a8a082929629d13ff179eb01, ASSIGN 2024-11-21T00:28:07,438 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1847e2b1a8a082929629d13ff179eb01, ASSIGN; state=OFFLINE, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=false 2024-11-21T00:28:07,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-21T00:28:07,588 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=1847e2b1a8a082929629d13ff179eb01, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:07,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:28:07,741 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:07,744 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,744 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:07,745 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,745 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:07,745 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,745 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,746 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,747 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:07,748 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1847e2b1a8a082929629d13ff179eb01 columnFamilyName A 2024-11-21T00:28:07,748 DEBUG [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:07,748 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.HStore(327): Store=1847e2b1a8a082929629d13ff179eb01/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:07,748 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,749 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:07,749 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1847e2b1a8a082929629d13ff179eb01 columnFamilyName B 2024-11-21T00:28:07,749 DEBUG [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:07,750 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.HStore(327): Store=1847e2b1a8a082929629d13ff179eb01/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:07,750 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,752 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:07,752 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1847e2b1a8a082929629d13ff179eb01 columnFamilyName C 2024-11-21T00:28:07,752 DEBUG [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:07,752 INFO [StoreOpener-1847e2b1a8a082929629d13ff179eb01-1 {}] regionserver.HStore(327): Store=1847e2b1a8a082929629d13ff179eb01/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:07,752 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,753 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,753 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,754 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:28:07,755 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:07,757 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:07,758 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 1847e2b1a8a082929629d13ff179eb01; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71004240, jitterRate=0.05804562568664551}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:28:07,758 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:07,759 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., pid=66, masterSystemTime=1732148887741 2024-11-21T00:28:07,760 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,760 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:07,760 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=1847e2b1a8a082929629d13ff179eb01, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:07,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-21T00:28:07,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 in 172 msec 2024-11-21T00:28:07,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-21T00:28:07,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1847e2b1a8a082929629d13ff179eb01, ASSIGN in 326 msec 2024-11-21T00:28:07,764 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:07,764 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148887764"}]},"ts":"1732148887764"} 2024-11-21T00:28:07,765 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-21T00:28:07,807 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:07,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 837 msec 2024-11-21T00:28:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-21T00:28:08,077 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-11-21T00:28:08,080 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3eec6530 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7792c763 2024-11-21T00:28:08,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a568ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,104 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,106 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,111 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:08,112 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:08,115 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dc273c3 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c8a18c7 2024-11-21T00:28:08,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e0e280, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x195206da to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45426917 2024-11-21T00:28:08,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@473477dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,340 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x282318cf to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e7fc60d 2024-11-21T00:28:08,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a91dc80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,376 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ea91426 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e66ea50 2024-11-21T00:28:08,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a874cc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,409 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x762de37e to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f50b381 2024-11-21T00:28:08,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f6119e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,434 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x124edab0 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7507573f 2024-11-21T00:28:08,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78439bc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,448 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x712d7bc3 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3e5c7476 2024-11-21T00:28:08,457 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a2545d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,458 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40da73c1 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df84068 2024-11-21T00:28:08,474 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d039dc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,475 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3be398a9 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644774bd 2024-11-21T00:28:08,487 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15db087a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,488 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ff3c1a9 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60cea876 2024-11-21T00:28:08,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1be4cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:08,516 DEBUG [hconnection-0x786575f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,518 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,535 DEBUG [hconnection-0x6a043d24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,536 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43204, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,551 DEBUG [hconnection-0x3875d84e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,551 DEBUG [hconnection-0x25cfb477-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,552 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:08,553 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-21T00:28:08,554 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:08,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-21T00:28:08,555 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:08,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:08,562 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,571 DEBUG [hconnection-0xc377428-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,573 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43236, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,576 DEBUG [hconnection-0x31fbbce4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,582 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:08,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:28:08,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:08,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:08,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:08,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:08,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:08,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:08,619 DEBUG [hconnection-0x18432d96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,621 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,639 DEBUG [hconnection-0x4fade4a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,641 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,647 DEBUG [hconnection-0x4f350d04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,648 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-21T00:28:08,667 DEBUG [hconnection-0x84b5fec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:08,668 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:08,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4bd54ee0ab0a493bace7e46295ddfe82 is 50, key is test_row_0/A:col10/1732148888595/Put/seqid=0 2024-11-21T00:28:08,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148948678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148948679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148948681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148948682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148948686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:08,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:08,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:08,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:08,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:08,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:08,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742057_1233 (size=14341) 2024-11-21T00:28:08,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4bd54ee0ab0a493bace7e46295ddfe82 2024-11-21T00:28:08,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/349a182e7d3448d4a74a1976e88f3978 is 50, key is test_row_0/B:col10/1732148888595/Put/seqid=0 2024-11-21T00:28:08,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148948794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148948796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148948797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148948803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148948808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742058_1234 (size=12001) 2024-11-21T00:28:08,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/349a182e7d3448d4a74a1976e88f3978 2024-11-21T00:28:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-21T00:28:08,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:08,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:08,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:08,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/eec87108e7e44b0c887d74154e92462f is 50, key is test_row_0/C:col10/1732148888595/Put/seqid=0 2024-11-21T00:28:08,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742059_1235 (size=12001) 2024-11-21T00:28:08,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/eec87108e7e44b0c887d74154e92462f 2024-11-21T00:28:08,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4bd54ee0ab0a493bace7e46295ddfe82 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4bd54ee0ab0a493bace7e46295ddfe82 2024-11-21T00:28:08,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4bd54ee0ab0a493bace7e46295ddfe82, entries=200, sequenceid=13, filesize=14.0 K 2024-11-21T00:28:08,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/349a182e7d3448d4a74a1976e88f3978 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/349a182e7d3448d4a74a1976e88f3978 2024-11-21T00:28:08,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/349a182e7d3448d4a74a1976e88f3978, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:28:08,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/eec87108e7e44b0c887d74154e92462f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eec87108e7e44b0c887d74154e92462f 2024-11-21T00:28:09,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eec87108e7e44b0c887d74154e92462f, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:28:09,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 400ms, sequenceid=13, compaction requested=false 2024-11-21T00:28:09,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:09,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:09,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:09,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:09,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:09,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:09,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:09,014 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:09,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:09,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:09,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:09,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148949025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148949025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/8db393263b2d46928a7a09c3741170ab is 50, key is test_row_0/A:col10/1732148889011/Put/seqid=0 2024-11-21T00:28:09,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148949031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148949033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148949033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742060_1236 (size=12001) 2024-11-21T00:28:09,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148949134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148949134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148949141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148949141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148949142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-21T00:28:09,170 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:09,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:09,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,326 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:09,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:09,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148949337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148949338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148949347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148949347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148949347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/8db393263b2d46928a7a09c3741170ab 2024-11-21T00:28:09,490 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:09,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:09,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/7200f446d67d48a4a6d00bc78653a85e is 50, key is test_row_0/B:col10/1732148889011/Put/seqid=0 2024-11-21T00:28:09,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742061_1237 (size=12001) 2024-11-21T00:28:09,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/7200f446d67d48a4a6d00bc78653a85e 2024-11-21T00:28:09,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/7010f11256644e15a9dd9c9d92f97f2f is 50, key is test_row_0/C:col10/1732148889011/Put/seqid=0 2024-11-21T00:28:09,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742062_1238 (size=12001) 2024-11-21T00:28:09,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/7010f11256644e15a9dd9c9d92f97f2f 2024-11-21T00:28:09,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/8db393263b2d46928a7a09c3741170ab as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8db393263b2d46928a7a09c3741170ab 2024-11-21T00:28:09,646 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:09,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:09,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,651 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148949655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8db393263b2d46928a7a09c3741170ab, entries=150, sequenceid=40, filesize=11.7 K 2024-11-21T00:28:09,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148949655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148949653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:09,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/7200f446d67d48a4a6d00bc78653a85e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7200f446d67d48a4a6d00bc78653a85e 2024-11-21T00:28:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-21T00:28:09,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148949661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148949662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7200f446d67d48a4a6d00bc78653a85e, entries=150, sequenceid=40, filesize=11.7 K 2024-11-21T00:28:09,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/7010f11256644e15a9dd9c9d92f97f2f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7010f11256644e15a9dd9c9d92f97f2f 2024-11-21T00:28:09,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7010f11256644e15a9dd9c9d92f97f2f, entries=150, sequenceid=40, filesize=11.7 K 2024-11-21T00:28:09,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 1847e2b1a8a082929629d13ff179eb01 in 726ms, sequenceid=40, compaction requested=false 2024-11-21T00:28:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:09,819 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:09,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-21T00:28:09,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:09,819 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-21T00:28:09,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:09,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:09,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:09,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:09,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:09,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:09,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/6852d85acb184cd49f875b4cbeaee98a is 50, key is test_row_0/A:col10/1732148889029/Put/seqid=0 2024-11-21T00:28:09,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742063_1239 (size=9657) 2024-11-21T00:28:09,872 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/6852d85acb184cd49f875b4cbeaee98a 2024-11-21T00:28:09,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/d519b374db964e63925b8079499ef94d is 50, key is test_row_0/B:col10/1732148889029/Put/seqid=0 2024-11-21T00:28:09,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742064_1240 (size=9657) 2024-11-21T00:28:09,922 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/d519b374db964e63925b8079499ef94d 2024-11-21T00:28:09,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/db2f092d5ae74d69bade7cac0aec4b13 is 50, key is test_row_0/C:col10/1732148889029/Put/seqid=0 2024-11-21T00:28:09,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742065_1241 (size=9657) 2024-11-21T00:28:09,985 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/db2f092d5ae74d69bade7cac0aec4b13 2024-11-21T00:28:09,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/6852d85acb184cd49f875b4cbeaee98a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6852d85acb184cd49f875b4cbeaee98a 2024-11-21T00:28:10,001 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6852d85acb184cd49f875b4cbeaee98a, entries=100, sequenceid=49, filesize=9.4 K 2024-11-21T00:28:10,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/d519b374db964e63925b8079499ef94d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d519b374db964e63925b8079499ef94d 2024-11-21T00:28:10,023 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d519b374db964e63925b8079499ef94d, entries=100, sequenceid=49, filesize=9.4 K 2024-11-21T00:28:10,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/db2f092d5ae74d69bade7cac0aec4b13 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/db2f092d5ae74d69bade7cac0aec4b13 2024-11-21T00:28:10,031 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/db2f092d5ae74d69bade7cac0aec4b13, entries=100, sequenceid=49, filesize=9.4 K 2024-11-21T00:28:10,032 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 1847e2b1a8a082929629d13ff179eb01 in 213ms, sequenceid=49, compaction requested=true 2024-11-21T00:28:10,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:10,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:10,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-21T00:28:10,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-21T00:28:10,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-21T00:28:10,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4780 sec 2024-11-21T00:28:10,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.4830 sec 2024-11-21T00:28:10,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:10,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:28:10,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:10,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:10,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:10,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/12779cccbbab419d9132f8ad1ff11b90 is 50, key is test_row_0/A:col10/1732148890174/Put/seqid=0 2024-11-21T00:28:10,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742066_1242 (size=12001) 2024-11-21T00:28:10,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/12779cccbbab419d9132f8ad1ff11b90 2024-11-21T00:28:10,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148950210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148950212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148950212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148950214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148950214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/0e248519d57a4ec0941405d742c34f08 is 50, key is test_row_0/B:col10/1732148890174/Put/seqid=0 2024-11-21T00:28:10,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742067_1243 (size=12001) 2024-11-21T00:28:10,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/0e248519d57a4ec0941405d742c34f08 2024-11-21T00:28:10,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148950318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6d89f9c6cf3944dc8e12fe9349ffa414 is 50, key is test_row_0/C:col10/1732148890174/Put/seqid=0 2024-11-21T00:28:10,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148950320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148950321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148950323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148950323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742068_1244 (size=12001) 2024-11-21T00:28:10,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6d89f9c6cf3944dc8e12fe9349ffa414 2024-11-21T00:28:10,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/12779cccbbab419d9132f8ad1ff11b90 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/12779cccbbab419d9132f8ad1ff11b90 2024-11-21T00:28:10,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/12779cccbbab419d9132f8ad1ff11b90, entries=150, sequenceid=61, filesize=11.7 K 2024-11-21T00:28:10,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/0e248519d57a4ec0941405d742c34f08 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0e248519d57a4ec0941405d742c34f08 2024-11-21T00:28:10,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0e248519d57a4ec0941405d742c34f08, entries=150, sequenceid=61, filesize=11.7 K 2024-11-21T00:28:10,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6d89f9c6cf3944dc8e12fe9349ffa414 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6d89f9c6cf3944dc8e12fe9349ffa414 2024-11-21T00:28:10,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6d89f9c6cf3944dc8e12fe9349ffa414, entries=150, sequenceid=61, filesize=11.7 K 2024-11-21T00:28:10,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 263ms, sequenceid=61, compaction requested=true 2024-11-21T00:28:10,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:10,438 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:10,440 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48000 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:10,440 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:10,441 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:10,441 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4bd54ee0ab0a493bace7e46295ddfe82, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8db393263b2d46928a7a09c3741170ab, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6852d85acb184cd49f875b4cbeaee98a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/12779cccbbab419d9132f8ad1ff11b90] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=46.9 K 2024-11-21T00:28:10,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:10,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:10,441 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:10,441 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bd54ee0ab0a493bace7e46295ddfe82, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148888570 2024-11-21T00:28:10,442 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8db393263b2d46928a7a09c3741170ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732148888677 2024-11-21T00:28:10,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:10,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:10,442 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6852d85acb184cd49f875b4cbeaee98a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732148889028 2024-11-21T00:28:10,443 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12779cccbbab419d9132f8ad1ff11b90, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732148890170 2024-11-21T00:28:10,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:10,443 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:10,443 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:10,443 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:10,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:10,443 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/349a182e7d3448d4a74a1976e88f3978, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7200f446d67d48a4a6d00bc78653a85e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d519b374db964e63925b8079499ef94d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0e248519d57a4ec0941405d742c34f08] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=44.6 K 2024-11-21T00:28:10,444 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 349a182e7d3448d4a74a1976e88f3978, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148888592 2024-11-21T00:28:10,444 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7200f446d67d48a4a6d00bc78653a85e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732148888677 2024-11-21T00:28:10,445 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d519b374db964e63925b8079499ef94d, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732148889028 2024-11-21T00:28:10,446 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e248519d57a4ec0941405d742c34f08, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732148890170 2024-11-21T00:28:10,467 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#198 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:10,468 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/17038da050a54864b70963eee81ce44e is 50, key is test_row_0/A:col10/1732148890174/Put/seqid=0 2024-11-21T00:28:10,471 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#199 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:10,472 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/825eb0efa0284e0598d551f20c1f81e0 is 50, key is test_row_0/B:col10/1732148890174/Put/seqid=0 2024-11-21T00:28:10,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742069_1245 (size=12139) 2024-11-21T00:28:10,525 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/17038da050a54864b70963eee81ce44e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/17038da050a54864b70963eee81ce44e 2024-11-21T00:28:10,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:10,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:10,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:10,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:10,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:10,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742070_1246 (size=12139) 2024-11-21T00:28:10,553 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into 17038da050a54864b70963eee81ce44e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:10,553 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:10,553 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=12, startTime=1732148890438; duration=0sec 2024-11-21T00:28:10,553 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:10,554 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:10,554 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:10,555 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/825eb0efa0284e0598d551f20c1f81e0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/825eb0efa0284e0598d551f20c1f81e0 2024-11-21T00:28:10,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:10,557 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:10,558 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:10,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,560 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eec87108e7e44b0c887d74154e92462f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7010f11256644e15a9dd9c9d92f97f2f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/db2f092d5ae74d69bade7cac0aec4b13, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6d89f9c6cf3944dc8e12fe9349ffa414] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=44.6 K 2024-11-21T00:28:10,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148950551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/064f44b9dc0f496488c4a0edee4cd2a1 is 50, key is test_row_0/A:col10/1732148890526/Put/seqid=0 2024-11-21T00:28:10,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148950552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,564 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting eec87108e7e44b0c887d74154e92462f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148888592 2024-11-21T00:28:10,564 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into 825eb0efa0284e0598d551f20c1f81e0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:10,564 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:10,565 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=12, startTime=1732148890441; duration=0sec 2024-11-21T00:28:10,565 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:10,565 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:10,565 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7010f11256644e15a9dd9c9d92f97f2f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732148888677 2024-11-21T00:28:10,566 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting db2f092d5ae74d69bade7cac0aec4b13, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732148889028 2024-11-21T00:28:10,567 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d89f9c6cf3944dc8e12fe9349ffa414, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732148890170 2024-11-21T00:28:10,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148950560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148950563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148950564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742071_1247 (size=12001) 2024-11-21T00:28:10,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/064f44b9dc0f496488c4a0edee4cd2a1 2024-11-21T00:28:10,605 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:10,606 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/500a7e520d4c4eaf9ceec242ee0a312c is 50, key is test_row_0/C:col10/1732148890174/Put/seqid=0 2024-11-21T00:28:10,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/eed015ad27884773a4cdf29d896aa0e6 is 50, key is test_row_0/B:col10/1732148890526/Put/seqid=0 2024-11-21T00:28:10,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742072_1248 (size=12139) 2024-11-21T00:28:10,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148950661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148950661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-21T00:28:10,667 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-21T00:28:10,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742073_1249 (size=12001) 2024-11-21T00:28:10,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:10,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148950674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-21T00:28:10,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148950674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,678 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:10,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148950676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,678 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:10,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:10,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-21T00:28:10,680 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/500a7e520d4c4eaf9ceec242ee0a312c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/500a7e520d4c4eaf9ceec242ee0a312c 2024-11-21T00:28:10,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/eed015ad27884773a4cdf29d896aa0e6 2024-11-21T00:28:10,709 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into 500a7e520d4c4eaf9ceec242ee0a312c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:10,709 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:10,709 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=12, startTime=1732148890442; duration=0sec 2024-11-21T00:28:10,709 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:10,709 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:10,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f901ec666acf4cfc9eb6c18504c3c00e is 50, key is test_row_0/C:col10/1732148890526/Put/seqid=0 2024-11-21T00:28:10,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742074_1250 (size=12001) 2024-11-21T00:28:10,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f901ec666acf4cfc9eb6c18504c3c00e 2024-11-21T00:28:10,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/064f44b9dc0f496488c4a0edee4cd2a1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/064f44b9dc0f496488c4a0edee4cd2a1 2024-11-21T00:28:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-21T00:28:10,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/064f44b9dc0f496488c4a0edee4cd2a1, entries=150, sequenceid=87, filesize=11.7 K 2024-11-21T00:28:10,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/eed015ad27884773a4cdf29d896aa0e6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/eed015ad27884773a4cdf29d896aa0e6 2024-11-21T00:28:10,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/eed015ad27884773a4cdf29d896aa0e6, entries=150, sequenceid=87, filesize=11.7 K 2024-11-21T00:28:10,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f901ec666acf4cfc9eb6c18504c3c00e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f901ec666acf4cfc9eb6c18504c3c00e 2024-11-21T00:28:10,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f901ec666acf4cfc9eb6c18504c3c00e, entries=150, sequenceid=87, filesize=11.7 K 2024-11-21T00:28:10,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 1847e2b1a8a082929629d13ff179eb01 in 287ms, sequenceid=87, compaction requested=false 2024-11-21T00:28:10,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:10,830 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-21T00:28:10,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:10,832 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:10,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:10,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:10,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:10,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:10,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4b7c031355a143858579bc77e9b38de5 is 50, key is test_row_0/A:col10/1732148890558/Put/seqid=0 2024-11-21T00:28:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:10,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:10,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742075_1251 (size=12001) 2024-11-21T00:28:10,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148950916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148950919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148950921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148950924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:10,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148950925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:10,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-21T00:28:11,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148951025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148951028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148951030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148951030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148951032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148951232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148951235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148951236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148951239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148951228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,280 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4b7c031355a143858579bc77e9b38de5 2024-11-21T00:28:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-21T00:28:11,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/935b5cdce7fd4717b5619fbe366d9871 is 50, key is test_row_0/B:col10/1732148890558/Put/seqid=0 2024-11-21T00:28:11,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742076_1252 (size=12001) 2024-11-21T00:28:11,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148951541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148951543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148951545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148951547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148951550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:11,751 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/935b5cdce7fd4717b5619fbe366d9871 2024-11-21T00:28:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-21T00:28:11,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/07197113ab724aecb58c91bf690aa2bf is 50, key is test_row_0/C:col10/1732148890558/Put/seqid=0 2024-11-21T00:28:11,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742077_1253 (size=12001) 2024-11-21T00:28:11,828 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/07197113ab724aecb58c91bf690aa2bf 2024-11-21T00:28:11,855 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-21T00:28:11,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4b7c031355a143858579bc77e9b38de5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4b7c031355a143858579bc77e9b38de5 2024-11-21T00:28:11,877 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4b7c031355a143858579bc77e9b38de5, entries=150, sequenceid=102, filesize=11.7 K 2024-11-21T00:28:11,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/935b5cdce7fd4717b5619fbe366d9871 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/935b5cdce7fd4717b5619fbe366d9871 2024-11-21T00:28:11,886 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/935b5cdce7fd4717b5619fbe366d9871, entries=150, sequenceid=102, filesize=11.7 K 2024-11-21T00:28:11,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/07197113ab724aecb58c91bf690aa2bf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/07197113ab724aecb58c91bf690aa2bf 2024-11-21T00:28:11,893 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/07197113ab724aecb58c91bf690aa2bf, entries=150, sequenceid=102, filesize=11.7 K 2024-11-21T00:28:11,894 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 1847e2b1a8a082929629d13ff179eb01 in 1063ms, sequenceid=102, compaction requested=true 2024-11-21T00:28:11,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:11,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:11,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-21T00:28:11,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-21T00:28:11,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-21T00:28:11,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2240 sec 2024-11-21T00:28:11,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.2300 sec 2024-11-21T00:28:12,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-21T00:28:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:12,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:12,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/d0a90098ddef4c5dbf4085b469d9fe00 is 50, key is test_row_0/A:col10/1732148890923/Put/seqid=0 2024-11-21T00:28:12,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148952065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148952065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148952065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148952071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148952072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742078_1254 (size=14341) 2024-11-21T00:28:12,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/d0a90098ddef4c5dbf4085b469d9fe00 2024-11-21T00:28:12,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/5e2c68e1ec67412ca246e5771ef92eb9 is 50, key is test_row_0/B:col10/1732148890923/Put/seqid=0 2024-11-21T00:28:12,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148952173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742079_1255 (size=12001) 2024-11-21T00:28:12,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148952173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148952178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148952179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/5e2c68e1ec67412ca246e5771ef92eb9 2024-11-21T00:28:12,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f884156eaa2f4c03a054597c88a2f4ed is 50, key is test_row_0/C:col10/1732148890923/Put/seqid=0 2024-11-21T00:28:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742080_1256 (size=12001) 2024-11-21T00:28:12,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148952386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148952387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148952387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148952388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148952691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148952694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148952694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:12,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148952695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f884156eaa2f4c03a054597c88a2f4ed 2024-11-21T00:28:12,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/d0a90098ddef4c5dbf4085b469d9fe00 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d0a90098ddef4c5dbf4085b469d9fe00 2024-11-21T00:28:12,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d0a90098ddef4c5dbf4085b469d9fe00, entries=200, sequenceid=128, filesize=14.0 K 2024-11-21T00:28:12,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/5e2c68e1ec67412ca246e5771ef92eb9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5e2c68e1ec67412ca246e5771ef92eb9 2024-11-21T00:28:12,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5e2c68e1ec67412ca246e5771ef92eb9, entries=150, sequenceid=128, filesize=11.7 K 2024-11-21T00:28:12,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f884156eaa2f4c03a054597c88a2f4ed as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f884156eaa2f4c03a054597c88a2f4ed 2024-11-21T00:28:12,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f884156eaa2f4c03a054597c88a2f4ed, entries=150, sequenceid=128, filesize=11.7 K 2024-11-21T00:28:12,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 1847e2b1a8a082929629d13ff179eb01 in 725ms, sequenceid=128, compaction requested=true 2024-11-21T00:28:12,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:12,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:12,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:12,773 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:12,773 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:12,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:12,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:12,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:12,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:12,775 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48142 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:12,775 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:12,776 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:12,776 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/825eb0efa0284e0598d551f20c1f81e0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/eed015ad27884773a4cdf29d896aa0e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/935b5cdce7fd4717b5619fbe366d9871, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5e2c68e1ec67412ca246e5771ef92eb9] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=47.0 K 2024-11-21T00:28:12,776 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50482 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:12,776 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:12,776 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:12,776 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/17038da050a54864b70963eee81ce44e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/064f44b9dc0f496488c4a0edee4cd2a1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4b7c031355a143858579bc77e9b38de5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d0a90098ddef4c5dbf4085b469d9fe00] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=49.3 K 2024-11-21T00:28:12,776 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 825eb0efa0284e0598d551f20c1f81e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732148890170 2024-11-21T00:28:12,776 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17038da050a54864b70963eee81ce44e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732148890170 2024-11-21T00:28:12,776 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting eed015ad27884773a4cdf29d896aa0e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732148890210 2024-11-21T00:28:12,777 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 064f44b9dc0f496488c4a0edee4cd2a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732148890210 2024-11-21T00:28:12,777 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 935b5cdce7fd4717b5619fbe366d9871, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732148890531 2024-11-21T00:28:12,777 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b7c031355a143858579bc77e9b38de5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732148890531 2024-11-21T00:28:12,778 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e2c68e1ec67412ca246e5771ef92eb9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732148890917 2024-11-21T00:28:12,778 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0a90098ddef4c5dbf4085b469d9fe00, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732148890902 2024-11-21T00:28:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-21T00:28:12,785 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-21T00:28:12,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-21T00:28:12,788 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-21T00:28:12,789 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:12,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:12,809 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#210 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:12,810 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/87aba94518eb46c5aab5c97c4d6d348d is 50, key is test_row_0/A:col10/1732148890923/Put/seqid=0 2024-11-21T00:28:12,817 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#211 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:12,817 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/89344ae3ad8740859b3e6c6e7f414387 is 50, key is test_row_0/B:col10/1732148890923/Put/seqid=0 2024-11-21T00:28:12,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742082_1258 (size=12275) 2024-11-21T00:28:12,898 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/89344ae3ad8740859b3e6c6e7f414387 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/89344ae3ad8740859b3e6c6e7f414387 2024-11-21T00:28:12,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742081_1257 (size=12275) 2024-11-21T00:28:12,907 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into 89344ae3ad8740859b3e6c6e7f414387(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:12,907 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:12,907 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=12, startTime=1732148892773; duration=0sec 2024-11-21T00:28:12,907 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:12,907 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:12,907 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-21T00:28:12,910 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48142 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:12,910 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:12,910 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:12,910 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/500a7e520d4c4eaf9ceec242ee0a312c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f901ec666acf4cfc9eb6c18504c3c00e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/07197113ab724aecb58c91bf690aa2bf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f884156eaa2f4c03a054597c88a2f4ed] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=47.0 K 2024-11-21T00:28:12,911 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 500a7e520d4c4eaf9ceec242ee0a312c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732148890170 2024-11-21T00:28:12,912 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f901ec666acf4cfc9eb6c18504c3c00e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732148890210 2024-11-21T00:28:12,912 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 07197113ab724aecb58c91bf690aa2bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732148890531 2024-11-21T00:28:12,913 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f884156eaa2f4c03a054597c88a2f4ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732148890917 2024-11-21T00:28:12,929 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#212 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:12,929 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d8553174bef546fabd46f59ff410b652 is 50, key is test_row_0/C:col10/1732148890923/Put/seqid=0 2024-11-21T00:28:12,940 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:12,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-21T00:28:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:12,941 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:28:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:12,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/38c7063c11624321af45e71b4df047e9 is 50, key is test_row_0/A:col10/1732148892056/Put/seqid=0 2024-11-21T00:28:12,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742083_1259 (size=12275) 2024-11-21T00:28:12,987 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d8553174bef546fabd46f59ff410b652 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d8553174bef546fabd46f59ff410b652 2024-11-21T00:28:12,992 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into d8553174bef546fabd46f59ff410b652(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:12,992 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:12,992 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=12, startTime=1732148892773; duration=0sec 2024-11-21T00:28:12,993 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:12,993 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:13,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742084_1260 (size=12151) 2024-11-21T00:28:13,008 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/38c7063c11624321af45e71b4df047e9 2024-11-21T00:28:13,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/c249724ee31a46b7b198c0f7f13022bc is 50, key is test_row_0/B:col10/1732148892056/Put/seqid=0 2024-11-21T00:28:13,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742085_1261 (size=12151) 2024-11-21T00:28:13,049 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/c249724ee31a46b7b198c0f7f13022bc 2024-11-21T00:28:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:13,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:13,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/341de1af37ac4ba184021a8f36e6f87d is 50, key is test_row_0/C:col10/1732148892056/Put/seqid=0 2024-11-21T00:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-21T00:28:13,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742086_1262 (size=12151) 2024-11-21T00:28:13,121 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/341de1af37ac4ba184021a8f36e6f87d 2024-11-21T00:28:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/38c7063c11624321af45e71b4df047e9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/38c7063c11624321af45e71b4df047e9 2024-11-21T00:28:13,140 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/38c7063c11624321af45e71b4df047e9, entries=150, sequenceid=140, filesize=11.9 K 2024-11-21T00:28:13,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/c249724ee31a46b7b198c0f7f13022bc as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c249724ee31a46b7b198c0f7f13022bc 2024-11-21T00:28:13,147 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c249724ee31a46b7b198c0f7f13022bc, entries=150, sequenceid=140, filesize=11.9 K 2024-11-21T00:28:13,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/341de1af37ac4ba184021a8f36e6f87d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/341de1af37ac4ba184021a8f36e6f87d 2024-11-21T00:28:13,153 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/341de1af37ac4ba184021a8f36e6f87d, entries=150, sequenceid=140, filesize=11.9 K 2024-11-21T00:28:13,155 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=120.76 KB/123660 for 1847e2b1a8a082929629d13ff179eb01 in 214ms, sequenceid=140, compaction requested=false 2024-11-21T00:28:13,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:13,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-21T00:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-21T00:28:13,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-21T00:28:13,161 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 368 msec 2024-11-21T00:28:13,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-21T00:28:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:13,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 375 msec 2024-11-21T00:28:13,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5bf0ee6f6ecb4d97a2e99e34df37e78d is 50, key is test_row_0/A:col10/1732148893138/Put/seqid=0 2024-11-21T00:28:13,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742087_1263 (size=14541) 2024-11-21T00:28:13,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5bf0ee6f6ecb4d97a2e99e34df37e78d 2024-11-21T00:28:13,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/8c5578136336493aac8946607e531e11 is 50, key is test_row_0/B:col10/1732148893138/Put/seqid=0 2024-11-21T00:28:13,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742088_1264 (size=12151) 2024-11-21T00:28:13,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/8c5578136336493aac8946607e531e11 2024-11-21T00:28:13,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148953270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148953273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148953277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148953278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/9ea86beb52e04293b0c7e78be30824de is 50, key is test_row_0/C:col10/1732148893138/Put/seqid=0 2024-11-21T00:28:13,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148953288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,313 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/87aba94518eb46c5aab5c97c4d6d348d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/87aba94518eb46c5aab5c97c4d6d348d 2024-11-21T00:28:13,322 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into 87aba94518eb46c5aab5c97c4d6d348d(size=12.0 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:13,322 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:13,322 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=12, startTime=1732148892773; duration=0sec 2024-11-21T00:28:13,322 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:13,322 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:13,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742089_1265 (size=12151) 2024-11-21T00:28:13,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148953388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148953390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148953399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148953401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148953398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-21T00:28:13,410 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-21T00:28:13,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-21T00:28:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-21T00:28:13,421 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:13,423 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:13,423 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-21T00:28:13,575 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-21T00:28:13,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:13,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:13,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:13,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148953593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148953594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148953605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148953606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148953608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-21T00:28:13,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-21T00:28:13,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:13,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:13,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:13,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/9ea86beb52e04293b0c7e78be30824de 2024-11-21T00:28:13,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5bf0ee6f6ecb4d97a2e99e34df37e78d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5bf0ee6f6ecb4d97a2e99e34df37e78d 2024-11-21T00:28:13,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5bf0ee6f6ecb4d97a2e99e34df37e78d, entries=200, sequenceid=163, filesize=14.2 K 2024-11-21T00:28:13,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/8c5578136336493aac8946607e531e11 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8c5578136336493aac8946607e531e11 2024-11-21T00:28:13,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8c5578136336493aac8946607e531e11, entries=150, sequenceid=163, filesize=11.9 K 2024-11-21T00:28:13,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/9ea86beb52e04293b0c7e78be30824de as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/9ea86beb52e04293b0c7e78be30824de 2024-11-21T00:28:13,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/9ea86beb52e04293b0c7e78be30824de, entries=150, sequenceid=163, filesize=11.9 K 2024-11-21T00:28:13,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 1847e2b1a8a082929629d13ff179eb01 in 694ms, sequenceid=163, compaction requested=true 2024-11-21T00:28:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:28:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-21T00:28:13,855 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:13,856 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:13,867 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38967 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:13,867 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:13,867 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,867 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/87aba94518eb46c5aab5c97c4d6d348d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/38c7063c11624321af45e71b4df047e9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5bf0ee6f6ecb4d97a2e99e34df37e78d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=38.1 K 2024-11-21T00:28:13,868 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87aba94518eb46c5aab5c97c4d6d348d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732148890917 2024-11-21T00:28:13,875 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36577 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:13,875 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:13,875 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,875 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d8553174bef546fabd46f59ff410b652, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/341de1af37ac4ba184021a8f36e6f87d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/9ea86beb52e04293b0c7e78be30824de] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=35.7 K 2024-11-21T00:28:13,875 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38c7063c11624321af45e71b4df047e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732148892056 2024-11-21T00:28:13,876 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5bf0ee6f6ecb4d97a2e99e34df37e78d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148893129 2024-11-21T00:28:13,876 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d8553174bef546fabd46f59ff410b652, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732148890917 2024-11-21T00:28:13,881 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-21T00:28:13,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:13,882 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-21T00:28:13,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:13,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:13,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:13,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:13,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:13,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:13,883 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 341de1af37ac4ba184021a8f36e6f87d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732148892056 2024-11-21T00:28:13,889 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ea86beb52e04293b0c7e78be30824de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148893129 2024-11-21T00:28:13,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/1249c5adeb1e4f2282f3c476152aba10 is 50, key is test_row_0/A:col10/1732148893192/Put/seqid=0 2024-11-21T00:28:13,905 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#220 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:13,906 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/a28cf042c3ce437fafce1b08945d3cae is 50, key is test_row_0/A:col10/1732148893138/Put/seqid=0 2024-11-21T00:28:13,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:13,910 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#221 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:13,910 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/05b8864b01a4431e8a0508f14c96a0dc is 50, key is test_row_0/C:col10/1732148893138/Put/seqid=0 2024-11-21T00:28:13,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148953952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148953954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148953954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148953957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148953959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:13,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742092_1268 (size=12527) 2024-11-21T00:28:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742090_1266 (size=12151) 2024-11-21T00:28:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742091_1267 (size=12527) 2024-11-21T00:28:13,998 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/a28cf042c3ce437fafce1b08945d3cae as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a28cf042c3ce437fafce1b08945d3cae 2024-11-21T00:28:14,006 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into a28cf042c3ce437fafce1b08945d3cae(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:14,006 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:14,006 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=13, startTime=1732148893855; duration=0sec 2024-11-21T00:28:14,006 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:14,006 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:14,007 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:14,008 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36577 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:14,008 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:14,008 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:14,008 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/89344ae3ad8740859b3e6c6e7f414387, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c249724ee31a46b7b198c0f7f13022bc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8c5578136336493aac8946607e531e11] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=35.7 K 2024-11-21T00:28:14,012 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89344ae3ad8740859b3e6c6e7f414387, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732148890917 2024-11-21T00:28:14,013 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c249724ee31a46b7b198c0f7f13022bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732148892056 2024-11-21T00:28:14,013 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c5578136336493aac8946607e531e11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148893129 2024-11-21T00:28:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-21T00:28:14,036 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#222 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:14,037 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/45925116307948dc9b807d48b5a5289d is 50, key is test_row_0/B:col10/1732148893138/Put/seqid=0 2024-11-21T00:28:14,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148954058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742093_1269 (size=12527) 2024-11-21T00:28:14,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148954063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148954064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,074 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/45925116307948dc9b807d48b5a5289d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45925116307948dc9b807d48b5a5289d 2024-11-21T00:28:14,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148954075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148954079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,087 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into 45925116307948dc9b807d48b5a5289d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:14,087 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:14,087 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=13, startTime=1732148893855; duration=0sec 2024-11-21T00:28:14,091 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:14,091 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:14,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148954265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148954272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148954273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148954279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148954286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,377 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/05b8864b01a4431e8a0508f14c96a0dc as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/05b8864b01a4431e8a0508f14c96a0dc 2024-11-21T00:28:14,379 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/1249c5adeb1e4f2282f3c476152aba10 2024-11-21T00:28:14,385 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into 05b8864b01a4431e8a0508f14c96a0dc(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:14,385 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:14,385 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=13, startTime=1732148893855; duration=0sec 2024-11-21T00:28:14,385 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:14,385 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:14,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/0bbc330f9f47424a8a6922d99cfa3966 is 50, key is test_row_0/B:col10/1732148893192/Put/seqid=0 2024-11-21T00:28:14,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742094_1270 (size=12151) 2024-11-21T00:28:14,420 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/0bbc330f9f47424a8a6922d99cfa3966 2024-11-21T00:28:14,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/de77c48747ae42748049445965866ed9 is 50, key is test_row_0/C:col10/1732148893192/Put/seqid=0 2024-11-21T00:28:14,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742095_1271 (size=12151) 2024-11-21T00:28:14,444 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/de77c48747ae42748049445965866ed9 2024-11-21T00:28:14,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/1249c5adeb1e4f2282f3c476152aba10 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1249c5adeb1e4f2282f3c476152aba10 2024-11-21T00:28:14,496 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1249c5adeb1e4f2282f3c476152aba10, entries=150, sequenceid=179, filesize=11.9 K 2024-11-21T00:28:14,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/0bbc330f9f47424a8a6922d99cfa3966 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0bbc330f9f47424a8a6922d99cfa3966 2024-11-21T00:28:14,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-21T00:28:14,525 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0bbc330f9f47424a8a6922d99cfa3966, entries=150, sequenceid=179, filesize=11.9 K 2024-11-21T00:28:14,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/de77c48747ae42748049445965866ed9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/de77c48747ae42748049445965866ed9 2024-11-21T00:28:14,536 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/de77c48747ae42748049445965866ed9, entries=150, sequenceid=179, filesize=11.9 K 2024-11-21T00:28:14,536 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1847e2b1a8a082929629d13ff179eb01 in 654ms, sequenceid=179, compaction requested=false 2024-11-21T00:28:14,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:14,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:14,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-21T00:28:14,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-21T00:28:14,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-21T00:28:14,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1150 sec 2024-11-21T00:28:14,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.1270 sec 2024-11-21T00:28:14,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-21T00:28:14,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:14,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:14,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:14,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:14,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:14,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:14,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:14,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4d62c30868e14196a521a72b9e18367b is 50, key is test_row_0/A:col10/1732148894573/Put/seqid=0 2024-11-21T00:28:14,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148954591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148954590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148954591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148954594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148954595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742096_1272 (size=14541) 2024-11-21T00:28:14,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4d62c30868e14196a521a72b9e18367b 2024-11-21T00:28:14,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/e047e79d202a46be8559e1c618efc2ca is 50, key is test_row_0/B:col10/1732148894573/Put/seqid=0 2024-11-21T00:28:14,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148954698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148954698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742097_1273 (size=12151) 2024-11-21T00:28:14,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148954700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148954700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148954903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148954904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148954904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:14,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:14,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148954905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148955099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/e047e79d202a46be8559e1c618efc2ca 2024-11-21T00:28:15,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/615abd6886434ad5a56bd857d9822a19 is 50, key is test_row_0/C:col10/1732148894573/Put/seqid=0 2024-11-21T00:28:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742098_1274 (size=12151) 2024-11-21T00:28:15,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148955207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148955213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148955212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148955218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-21T00:28:15,525 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-21T00:28:15,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-21T00:28:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-21T00:28:15,555 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:15,558 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:15,558 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:15,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/615abd6886434ad5a56bd857d9822a19 2024-11-21T00:28:15,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/4d62c30868e14196a521a72b9e18367b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4d62c30868e14196a521a72b9e18367b 2024-11-21T00:28:15,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4d62c30868e14196a521a72b9e18367b, entries=200, sequenceid=204, filesize=14.2 K 2024-11-21T00:28:15,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/e047e79d202a46be8559e1c618efc2ca as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e047e79d202a46be8559e1c618efc2ca 2024-11-21T00:28:15,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e047e79d202a46be8559e1c618efc2ca, entries=150, sequenceid=204, filesize=11.9 K 2024-11-21T00:28:15,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/615abd6886434ad5a56bd857d9822a19 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/615abd6886434ad5a56bd857d9822a19 2024-11-21T00:28:15,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/615abd6886434ad5a56bd857d9822a19, entries=150, sequenceid=204, filesize=11.9 K 2024-11-21T00:28:15,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 1847e2b1a8a082929629d13ff179eb01 in 1033ms, sequenceid=204, compaction requested=true 2024-11-21T00:28:15,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:15,607 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:15,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:15,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:15,607 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:15,608 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:15,609 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:15,609 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:15,609 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45925116307948dc9b807d48b5a5289d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0bbc330f9f47424a8a6922d99cfa3966, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e047e79d202a46be8559e1c618efc2ca] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.0 K 2024-11-21T00:28:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:15,611 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39219 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:15,611 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:15,611 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:15,611 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a28cf042c3ce437fafce1b08945d3cae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1249c5adeb1e4f2282f3c476152aba10, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4d62c30868e14196a521a72b9e18367b] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=38.3 K 2024-11-21T00:28:15,612 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 45925116307948dc9b807d48b5a5289d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148893129 2024-11-21T00:28:15,612 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a28cf042c3ce437fafce1b08945d3cae, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148893129 2024-11-21T00:28:15,612 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bbc330f9f47424a8a6922d99cfa3966, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732148893192 2024-11-21T00:28:15,612 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1249c5adeb1e4f2282f3c476152aba10, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732148893192 2024-11-21T00:28:15,613 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e047e79d202a46be8559e1c618efc2ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732148893954 2024-11-21T00:28:15,613 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d62c30868e14196a521a72b9e18367b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732148893952 2024-11-21T00:28:15,620 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:15,621 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#229 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:15,621 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/ae8a4e36de544445b7423a3e02b11a56 is 50, key is test_row_0/A:col10/1732148894573/Put/seqid=0 2024-11-21T00:28:15,621 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/4dbcfc03f3bb465a81996e1f100899c5 is 50, key is test_row_0/B:col10/1732148894573/Put/seqid=0 2024-11-21T00:28:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-21T00:28:15,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742099_1275 (size=12629) 2024-11-21T00:28:15,668 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/4dbcfc03f3bb465a81996e1f100899c5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/4dbcfc03f3bb465a81996e1f100899c5 2024-11-21T00:28:15,674 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into 4dbcfc03f3bb465a81996e1f100899c5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:15,674 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:15,674 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=13, startTime=1732148895607; duration=0sec 2024-11-21T00:28:15,674 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:15,674 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:15,674 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:15,676 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:15,676 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:15,676 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:15,676 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/05b8864b01a4431e8a0508f14c96a0dc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/de77c48747ae42748049445965866ed9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/615abd6886434ad5a56bd857d9822a19] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.0 K 2024-11-21T00:28:15,676 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 05b8864b01a4431e8a0508f14c96a0dc, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148893129 2024-11-21T00:28:15,677 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting de77c48747ae42748049445965866ed9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732148893192 2024-11-21T00:28:15,677 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 615abd6886434ad5a56bd857d9822a19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732148893954 2024-11-21T00:28:15,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742100_1276 (size=12629) 2024-11-21T00:28:15,689 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/ae8a4e36de544445b7423a3e02b11a56 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ae8a4e36de544445b7423a3e02b11a56 2024-11-21T00:28:15,695 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into ae8a4e36de544445b7423a3e02b11a56(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:15,695 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:15,695 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=13, startTime=1732148895607; duration=0sec 2024-11-21T00:28:15,695 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:15,695 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:15,696 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#230 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:15,697 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6c3c331c2f7d48eb911b6abab1a7f0bb is 50, key is test_row_0/C:col10/1732148894573/Put/seqid=0 2024-11-21T00:28:15,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-21T00:28:15,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:15,711 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:28:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:15,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:15,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:15,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742101_1277 (size=12629) 2024-11-21T00:28:15,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/6316c6f8877048bc93bd8d95f1939e66 is 50, key is test_row_0/A:col10/1732148894593/Put/seqid=0 2024-11-21T00:28:15,740 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6c3c331c2f7d48eb911b6abab1a7f0bb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6c3c331c2f7d48eb911b6abab1a7f0bb 2024-11-21T00:28:15,747 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into 6c3c331c2f7d48eb911b6abab1a7f0bb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:15,747 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:15,747 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=13, startTime=1732148895609; duration=0sec 2024-11-21T00:28:15,748 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:15,748 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:15,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148955747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148955752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148955752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148955752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742102_1278 (size=12151) 2024-11-21T00:28:15,785 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/6316c6f8877048bc93bd8d95f1939e66 2024-11-21T00:28:15,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/116602729c174aa68d11ecc1c3b56d24 is 50, key is test_row_0/B:col10/1732148894593/Put/seqid=0 2024-11-21T00:28:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742103_1279 (size=12151) 2024-11-21T00:28:15,838 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/116602729c174aa68d11ecc1c3b56d24 2024-11-21T00:28:15,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/46e2bd47fb804bfc93abe80664d6412a is 50, key is test_row_0/C:col10/1732148894593/Put/seqid=0 2024-11-21T00:28:15,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-21T00:28:15,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148955858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148955859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148955859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:15,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742104_1280 (size=12151) 2024-11-21T00:28:15,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:15,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148955867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148956062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148956062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148956064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148956076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148956105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-21T00:28:16,266 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/46e2bd47fb804bfc93abe80664d6412a 2024-11-21T00:28:16,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/6316c6f8877048bc93bd8d95f1939e66 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6316c6f8877048bc93bd8d95f1939e66 2024-11-21T00:28:16,277 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6316c6f8877048bc93bd8d95f1939e66, entries=150, sequenceid=220, filesize=11.9 K 2024-11-21T00:28:16,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/116602729c174aa68d11ecc1c3b56d24 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/116602729c174aa68d11ecc1c3b56d24 2024-11-21T00:28:16,282 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/116602729c174aa68d11ecc1c3b56d24, entries=150, sequenceid=220, filesize=11.9 K 2024-11-21T00:28:16,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/46e2bd47fb804bfc93abe80664d6412a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/46e2bd47fb804bfc93abe80664d6412a 2024-11-21T00:28:16,286 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/46e2bd47fb804bfc93abe80664d6412a, entries=150, sequenceid=220, filesize=11.9 K 2024-11-21T00:28:16,287 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 1847e2b1a8a082929629d13ff179eb01 in 576ms, sequenceid=220, compaction requested=false 2024-11-21T00:28:16,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:16,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:16,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-21T00:28:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-21T00:28:16,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-21T00:28:16,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 730 msec 2024-11-21T00:28:16,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 750 msec 2024-11-21T00:28:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:16,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:16,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:16,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:16,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:16,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:16,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:16,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:16,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/826f42de597649218db7c66888b83a34 is 50, key is test_row_0/A:col10/1732148896371/Put/seqid=0 2024-11-21T00:28:16,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742105_1281 (size=14541) 2024-11-21T00:28:16,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148956382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148956386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148956386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148956386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148956486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148956490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148956490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148956490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-21T00:28:16,656 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-21T00:28:16,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:16,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-21T00:28:16,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-21T00:28:16,658 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:16,658 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:16,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:16,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148956689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148956694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148956694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148956694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-21T00:28:16,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/826f42de597649218db7c66888b83a34 2024-11-21T00:28:16,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/fd3f15bd608744f3af11e95301a325cc is 50, key is test_row_0/B:col10/1732148896371/Put/seqid=0 2024-11-21T00:28:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742106_1282 (size=12151) 2024-11-21T00:28:16,809 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-21T00:28:16,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:16,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:16,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:16,810 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:16,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:16,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-21T00:28:16,962 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-21T00:28:16,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:16,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:16,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:16,962 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:16,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:16,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148956993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148956997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148956997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:16,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:16,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148956997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,114 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-21T00:28:17,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:17,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/fd3f15bd608744f3af11e95301a325cc 2024-11-21T00:28:17,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6764e263b8d24f2ab605e095b020d3b9 is 50, key is test_row_0/C:col10/1732148896371/Put/seqid=0 2024-11-21T00:28:17,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742107_1283 (size=12151) 2024-11-21T00:28:17,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-21T00:28:17,269 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-21T00:28:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,422 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-21T00:28:17,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:17,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:17,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148957497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:17,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148957499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:17,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148957500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:17,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148957502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,574 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-21T00:28:17,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:17,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:17,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6764e263b8d24f2ab605e095b020d3b9 2024-11-21T00:28:17,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/826f42de597649218db7c66888b83a34 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/826f42de597649218db7c66888b83a34 2024-11-21T00:28:17,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/826f42de597649218db7c66888b83a34, entries=200, sequenceid=247, filesize=14.2 K 2024-11-21T00:28:17,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/fd3f15bd608744f3af11e95301a325cc as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fd3f15bd608744f3af11e95301a325cc 2024-11-21T00:28:17,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fd3f15bd608744f3af11e95301a325cc, entries=150, sequenceid=247, filesize=11.9 K 2024-11-21T00:28:17,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6764e263b8d24f2ab605e095b020d3b9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6764e263b8d24f2ab605e095b020d3b9 2024-11-21T00:28:17,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6764e263b8d24f2ab605e095b020d3b9, entries=150, sequenceid=247, filesize=11.9 K 2024-11-21T00:28:17,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 1847e2b1a8a082929629d13ff179eb01 in 1284ms, sequenceid=247, compaction requested=true 2024-11-21T00:28:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:17,656 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:17,656 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:17,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:17,657 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:17,657 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:17,657 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:17,657 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:17,657 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,657 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,657 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/4dbcfc03f3bb465a81996e1f100899c5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/116602729c174aa68d11ecc1c3b56d24, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fd3f15bd608744f3af11e95301a325cc] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.1 K 2024-11-21T00:28:17,657 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ae8a4e36de544445b7423a3e02b11a56, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6316c6f8877048bc93bd8d95f1939e66, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/826f42de597649218db7c66888b83a34] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=38.4 K 2024-11-21T00:28:17,658 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dbcfc03f3bb465a81996e1f100899c5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732148893954 2024-11-21T00:28:17,658 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae8a4e36de544445b7423a3e02b11a56, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732148893954 2024-11-21T00:28:17,658 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6316c6f8877048bc93bd8d95f1939e66, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732148894580 2024-11-21T00:28:17,658 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 116602729c174aa68d11ecc1c3b56d24, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732148894580 2024-11-21T00:28:17,658 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 826f42de597649218db7c66888b83a34, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148895751 2024-11-21T00:28:17,658 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fd3f15bd608744f3af11e95301a325cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148896371 2024-11-21T00:28:17,666 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:17,666 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#238 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:17,666 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/d9d4045e5ffb4ac98acb6a09bebf8c7e is 50, key is test_row_0/B:col10/1732148896371/Put/seqid=0 2024-11-21T00:28:17,666 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/f8b436ed5c5a49508c86975d58407053 is 50, key is test_row_0/A:col10/1732148896371/Put/seqid=0 2024-11-21T00:28:17,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742109_1285 (size=12731) 2024-11-21T00:28:17,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742108_1284 (size=12731) 2024-11-21T00:28:17,695 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/d9d4045e5ffb4ac98acb6a09bebf8c7e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d9d4045e5ffb4ac98acb6a09bebf8c7e 2024-11-21T00:28:17,703 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/f8b436ed5c5a49508c86975d58407053 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f8b436ed5c5a49508c86975d58407053 2024-11-21T00:28:17,704 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into d9d4045e5ffb4ac98acb6a09bebf8c7e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:17,704 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:17,704 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=13, startTime=1732148897656; duration=0sec 2024-11-21T00:28:17,705 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:17,705 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:17,705 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:17,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:17,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:17,717 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,717 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6c3c331c2f7d48eb911b6abab1a7f0bb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/46e2bd47fb804bfc93abe80664d6412a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6764e263b8d24f2ab605e095b020d3b9] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.1 K 2024-11-21T00:28:17,718 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c3c331c2f7d48eb911b6abab1a7f0bb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732148893954 2024-11-21T00:28:17,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 46e2bd47fb804bfc93abe80664d6412a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732148894580 2024-11-21T00:28:17,723 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6764e263b8d24f2ab605e095b020d3b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148896371 2024-11-21T00:28:17,725 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into f8b436ed5c5a49508c86975d58407053(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:17,726 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:17,726 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=13, startTime=1732148897656; duration=0sec 2024-11-21T00:28:17,726 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:17,726 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:17,727 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:17,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-21T00:28:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:17,728 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:17,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/f84abddbf00245539c73cba9dc173505 is 50, key is test_row_0/A:col10/1732148896385/Put/seqid=0 2024-11-21T00:28:17,733 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#239 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:17,733 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/eda84e91db574a3ca1b7644b2e47e0e5 is 50, key is test_row_0/C:col10/1732148896371/Put/seqid=0 2024-11-21T00:28:17,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742110_1286 (size=12251) 2024-11-21T00:28:17,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742111_1287 (size=12731) 2024-11-21T00:28:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-21T00:28:18,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:18,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:18,149 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/f84abddbf00245539c73cba9dc173505 2024-11-21T00:28:18,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/5ad3057cff7d404baac91e4e68e3e070 is 50, key is test_row_0/B:col10/1732148896385/Put/seqid=0 2024-11-21T00:28:18,163 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/eda84e91db574a3ca1b7644b2e47e0e5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eda84e91db574a3ca1b7644b2e47e0e5 2024-11-21T00:28:18,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742112_1288 (size=12251) 2024-11-21T00:28:18,171 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into eda84e91db574a3ca1b7644b2e47e0e5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:18,171 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:18,171 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=13, startTime=1732148897656; duration=0sec 2024-11-21T00:28:18,171 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:18,171 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:18,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148958194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:18,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148958296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:18,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148958499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:18,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148958505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:18,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148958507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:18,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148958509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:18,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148958510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:18,571 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/5ad3057cff7d404baac91e4e68e3e070 2024-11-21T00:28:18,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/57aa7341429a4c6ba9de44babd0e5b99 is 50, key is test_row_0/C:col10/1732148896385/Put/seqid=0 2024-11-21T00:28:18,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742113_1289 (size=12251) 2024-11-21T00:28:18,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-21T00:28:18,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148958807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:19,004 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/57aa7341429a4c6ba9de44babd0e5b99 2024-11-21T00:28:19,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/f84abddbf00245539c73cba9dc173505 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f84abddbf00245539c73cba9dc173505 2024-11-21T00:28:19,014 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f84abddbf00245539c73cba9dc173505, entries=150, sequenceid=261, filesize=12.0 K 2024-11-21T00:28:19,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/5ad3057cff7d404baac91e4e68e3e070 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5ad3057cff7d404baac91e4e68e3e070 2024-11-21T00:28:19,021 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5ad3057cff7d404baac91e4e68e3e070, entries=150, sequenceid=261, filesize=12.0 K 2024-11-21T00:28:19,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/57aa7341429a4c6ba9de44babd0e5b99 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/57aa7341429a4c6ba9de44babd0e5b99 2024-11-21T00:28:19,027 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/57aa7341429a4c6ba9de44babd0e5b99, entries=150, sequenceid=261, filesize=12.0 K 2024-11-21T00:28:19,028 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 1300ms, sequenceid=261, compaction requested=false 2024-11-21T00:28:19,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:19,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:19,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-21T00:28:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-21T00:28:19,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-21T00:28:19,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3710 sec 2024-11-21T00:28:19,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.3750 sec 2024-11-21T00:28:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:19,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:19,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:19,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:19,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:19,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:19,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:19,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:19,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/ec37e0bfba92478caefd3dc36b072dc9 is 50, key is test_row_0/A:col10/1732148899311/Put/seqid=0 2024-11-21T00:28:19,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742114_1290 (size=12301) 2024-11-21T00:28:19,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/ec37e0bfba92478caefd3dc36b072dc9 2024-11-21T00:28:19,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/45a0bcfb55ff417195cc641c5c6878a4 is 50, key is test_row_0/B:col10/1732148899311/Put/seqid=0 2024-11-21T00:28:19,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:19,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148959339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:19,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742115_1291 (size=12301) 2024-11-21T00:28:19,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:19,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148959442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:19,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148959645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:19,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/45a0bcfb55ff417195cc641c5c6878a4 2024-11-21T00:28:19,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f2ae4160d26449148d9408363471d438 is 50, key is test_row_0/C:col10/1732148899311/Put/seqid=0 2024-11-21T00:28:19,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742116_1292 (size=12301) 2024-11-21T00:28:19,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f2ae4160d26449148d9408363471d438 2024-11-21T00:28:19,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/ec37e0bfba92478caefd3dc36b072dc9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ec37e0bfba92478caefd3dc36b072dc9 2024-11-21T00:28:19,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ec37e0bfba92478caefd3dc36b072dc9, entries=150, sequenceid=287, filesize=12.0 K 2024-11-21T00:28:19,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/45a0bcfb55ff417195cc641c5c6878a4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45a0bcfb55ff417195cc641c5c6878a4 2024-11-21T00:28:19,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45a0bcfb55ff417195cc641c5c6878a4, entries=150, sequenceid=287, filesize=12.0 K 2024-11-21T00:28:19,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/f2ae4160d26449148d9408363471d438 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f2ae4160d26449148d9408363471d438 2024-11-21T00:28:19,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f2ae4160d26449148d9408363471d438, entries=150, sequenceid=287, filesize=12.0 K 2024-11-21T00:28:19,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1847e2b1a8a082929629d13ff179eb01 in 464ms, sequenceid=287, compaction requested=true 2024-11-21T00:28:19,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:19,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:19,776 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:19,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:19,776 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:19,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:19,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:19,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:19,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:19,777 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:19,777 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:19,778 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:19,778 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:19,778 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:19,778 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:19,778 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f8b436ed5c5a49508c86975d58407053, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f84abddbf00245539c73cba9dc173505, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ec37e0bfba92478caefd3dc36b072dc9] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.4 K 2024-11-21T00:28:19,778 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d9d4045e5ffb4ac98acb6a09bebf8c7e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5ad3057cff7d404baac91e4e68e3e070, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45a0bcfb55ff417195cc641c5c6878a4] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.4 K 2024-11-21T00:28:19,778 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d9d4045e5ffb4ac98acb6a09bebf8c7e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148896371 2024-11-21T00:28:19,778 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8b436ed5c5a49508c86975d58407053, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148896371 2024-11-21T00:28:19,779 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting f84abddbf00245539c73cba9dc173505, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732148896381 2024-11-21T00:28:19,779 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ad3057cff7d404baac91e4e68e3e070, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732148896381 2024-11-21T00:28:19,779 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 45a0bcfb55ff417195cc641c5c6878a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732148898181 2024-11-21T00:28:19,779 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec37e0bfba92478caefd3dc36b072dc9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732148898181 2024-11-21T00:28:19,791 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#246 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:19,791 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/aa760f13b6034882b3e141a1c9b840dd is 50, key is test_row_0/B:col10/1732148899311/Put/seqid=0 2024-11-21T00:28:19,795 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#247 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:19,796 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/2aabb07c9aab4f00a6e2637309575b89 is 50, key is test_row_0/A:col10/1732148899311/Put/seqid=0 2024-11-21T00:28:19,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742117_1293 (size=12983) 2024-11-21T00:28:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742118_1294 (size=12983) 2024-11-21T00:28:19,832 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/aa760f13b6034882b3e141a1c9b840dd as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/aa760f13b6034882b3e141a1c9b840dd 2024-11-21T00:28:19,839 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into aa760f13b6034882b3e141a1c9b840dd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:19,839 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:19,839 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=13, startTime=1732148899776; duration=0sec 2024-11-21T00:28:19,839 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:19,839 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:19,839 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:19,840 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:19,840 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:19,840 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:19,841 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eda84e91db574a3ca1b7644b2e47e0e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/57aa7341429a4c6ba9de44babd0e5b99, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f2ae4160d26449148d9408363471d438] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.4 K 2024-11-21T00:28:19,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting eda84e91db574a3ca1b7644b2e47e0e5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732148896371 2024-11-21T00:28:19,841 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 57aa7341429a4c6ba9de44babd0e5b99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732148896381 2024-11-21T00:28:19,846 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f2ae4160d26449148d9408363471d438, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732148898181 2024-11-21T00:28:19,857 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#248 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:19,857 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/44000524fb624e3aaacb56a7a45b2214 is 50, key is test_row_0/C:col10/1732148899311/Put/seqid=0 2024-11-21T00:28:19,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742119_1295 (size=12983) 2024-11-21T00:28:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:19,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:19,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:19,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:19,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:19,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:19,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:19,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:19,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/73c19d54a5d34dc2baa579eab958f4ef is 50, key is test_row_0/A:col10/1732148899335/Put/seqid=0 2024-11-21T00:28:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742120_1296 (size=12301) 2024-11-21T00:28:20,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148960004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148960107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,260 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/2aabb07c9aab4f00a6e2637309575b89 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/2aabb07c9aab4f00a6e2637309575b89 2024-11-21T00:28:20,266 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into 2aabb07c9aab4f00a6e2637309575b89(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:20,266 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:20,266 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=13, startTime=1732148899776; duration=0sec 2024-11-21T00:28:20,266 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:20,266 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:20,269 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/44000524fb624e3aaacb56a7a45b2214 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/44000524fb624e3aaacb56a7a45b2214 2024-11-21T00:28:20,278 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into 44000524fb624e3aaacb56a7a45b2214(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:20,278 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:20,278 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=13, startTime=1732148899776; duration=0sec 2024-11-21T00:28:20,278 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:20,278 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:20,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148960311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/73c19d54a5d34dc2baa579eab958f4ef 2024-11-21T00:28:20,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/487af1b88b7e4d8d9f7c96f504c9a067 is 50, key is test_row_0/B:col10/1732148899335/Put/seqid=0 2024-11-21T00:28:20,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742121_1297 (size=12301) 2024-11-21T00:28:20,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/487af1b88b7e4d8d9f7c96f504c9a067 2024-11-21T00:28:20,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/c33df9504dad4a298ddf8f6b973010c0 is 50, key is test_row_0/C:col10/1732148899335/Put/seqid=0 2024-11-21T00:28:20,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742122_1298 (size=12301) 2024-11-21T00:28:20,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148960510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,512 DEBUG [Thread-1086 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:20,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148960516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,517 DEBUG [Thread-1082 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:20,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148960517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,519 DEBUG [Thread-1080 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:20,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148960525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,526 DEBUG [Thread-1088 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:20,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:20,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148960613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-21T00:28:20,765 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-21T00:28:20,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:20,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-21T00:28:20,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-21T00:28:20,768 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:20,768 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:20,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:20,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/c33df9504dad4a298ddf8f6b973010c0 2024-11-21T00:28:20,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/73c19d54a5d34dc2baa579eab958f4ef as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/73c19d54a5d34dc2baa579eab958f4ef 2024-11-21T00:28:20,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/73c19d54a5d34dc2baa579eab958f4ef, entries=150, sequenceid=300, filesize=12.0 K 2024-11-21T00:28:20,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/487af1b88b7e4d8d9f7c96f504c9a067 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/487af1b88b7e4d8d9f7c96f504c9a067 2024-11-21T00:28:20,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/487af1b88b7e4d8d9f7c96f504c9a067, entries=150, sequenceid=300, filesize=12.0 K 2024-11-21T00:28:20,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/c33df9504dad4a298ddf8f6b973010c0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c33df9504dad4a298ddf8f6b973010c0 2024-11-21T00:28:20,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c33df9504dad4a298ddf8f6b973010c0, entries=150, sequenceid=300, filesize=12.0 K 2024-11-21T00:28:20,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 882ms, sequenceid=300, compaction requested=false 2024-11-21T00:28:20,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:20,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-21T00:28:20,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:20,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-21T00:28:20,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:20,920 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-21T00:28:20,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:20,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:20,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:20,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:20,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:20,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:20,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/d8d87e3cbff4472ab584087175b9eab3 is 50, key is test_row_0/A:col10/1732148900001/Put/seqid=0 2024-11-21T00:28:20,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742123_1299 (size=12301) 2024-11-21T00:28:20,929 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/d8d87e3cbff4472ab584087175b9eab3 2024-11-21T00:28:20,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/03da2f6ad48a400ca9a6901130d5ba72 is 50, key is test_row_0/B:col10/1732148900001/Put/seqid=0 2024-11-21T00:28:20,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742124_1300 (size=12301) 2024-11-21T00:28:20,942 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/03da2f6ad48a400ca9a6901130d5ba72 2024-11-21T00:28:20,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/7eecc53573484b87b8c8b361d6f97fef is 50, key is test_row_0/C:col10/1732148900001/Put/seqid=0 2024-11-21T00:28:20,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742125_1301 (size=12301) 2024-11-21T00:28:20,960 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/7eecc53573484b87b8c8b361d6f97fef 2024-11-21T00:28:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/d8d87e3cbff4472ab584087175b9eab3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d8d87e3cbff4472ab584087175b9eab3 2024-11-21T00:28:20,968 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d8d87e3cbff4472ab584087175b9eab3, entries=150, sequenceid=326, filesize=12.0 K 2024-11-21T00:28:20,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/03da2f6ad48a400ca9a6901130d5ba72 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/03da2f6ad48a400ca9a6901130d5ba72 2024-11-21T00:28:20,973 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/03da2f6ad48a400ca9a6901130d5ba72, entries=150, sequenceid=326, filesize=12.0 K 2024-11-21T00:28:20,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/7eecc53573484b87b8c8b361d6f97fef as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7eecc53573484b87b8c8b361d6f97fef 2024-11-21T00:28:20,980 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7eecc53573484b87b8c8b361d6f97fef, entries=150, sequenceid=326, filesize=12.0 K 2024-11-21T00:28:20,981 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 1847e2b1a8a082929629d13ff179eb01 in 61ms, sequenceid=326, compaction requested=true 2024-11-21T00:28:20,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:20,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:20,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-21T00:28:20,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-21T00:28:20,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-21T00:28:20,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 214 msec 2024-11-21T00:28:20,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 218 msec 2024-11-21T00:28:21,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-21T00:28:21,069 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-21T00:28:21,070 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:21,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-21T00:28:21,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-21T00:28:21,072 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:21,072 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:21,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:21,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:21,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:28:21,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:21,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:21,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:21,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:21,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:21,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:21,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/b31f0ddc4a004dc583b10348306effb0 is 50, key is test_row_0/A:col10/1732148901132/Put/seqid=0 2024-11-21T00:28:21,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742126_1302 (size=14741) 2024-11-21T00:28:21,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-21T00:28:21,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:21,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148961215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,223 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-21T00:28:21,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:21,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148961319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-21T00:28:21,376 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-21T00:28:21,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:21,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:21,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148961523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,529 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-21T00:28:21,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:21,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/b31f0ddc4a004dc583b10348306effb0 2024-11-21T00:28:21,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/318fbafdd7984f5eb79b04ef29f57c4d is 50, key is test_row_0/B:col10/1732148901132/Put/seqid=0 2024-11-21T00:28:21,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742127_1303 (size=12301) 2024-11-21T00:28:21,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/318fbafdd7984f5eb79b04ef29f57c4d 2024-11-21T00:28:21,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/06990b80b1bf444e82aa668792c3ad47 is 50, key is test_row_0/C:col10/1732148901132/Put/seqid=0 2024-11-21T00:28:21,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742128_1304 (size=12301) 2024-11-21T00:28:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-21T00:28:21,689 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-21T00:28:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:21,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148961828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,842 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-21T00:28:21,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:21,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,995 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:21,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-21T00:28:21,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:21,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:21,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:21,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:22,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/06990b80b1bf444e82aa668792c3ad47 2024-11-21T00:28:22,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/b31f0ddc4a004dc583b10348306effb0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/b31f0ddc4a004dc583b10348306effb0 2024-11-21T00:28:22,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/b31f0ddc4a004dc583b10348306effb0, entries=200, sequenceid=337, filesize=14.4 K 2024-11-21T00:28:22,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/318fbafdd7984f5eb79b04ef29f57c4d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/318fbafdd7984f5eb79b04ef29f57c4d 2024-11-21T00:28:22,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/318fbafdd7984f5eb79b04ef29f57c4d, entries=150, sequenceid=337, filesize=12.0 K 2024-11-21T00:28:22,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/06990b80b1bf444e82aa668792c3ad47 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/06990b80b1bf444e82aa668792c3ad47 2024-11-21T00:28:22,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/06990b80b1bf444e82aa668792c3ad47, entries=150, sequenceid=337, filesize=12.0 K 2024-11-21T00:28:22,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 1847e2b1a8a082929629d13ff179eb01 in 929ms, sequenceid=337, compaction requested=true 2024-11-21T00:28:22,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:22,062 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:22,064 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:22,064 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:22,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:22,064 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:22,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:22,064 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:22,064 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/2aabb07c9aab4f00a6e2637309575b89, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/73c19d54a5d34dc2baa579eab958f4ef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d8d87e3cbff4472ab584087175b9eab3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/b31f0ddc4a004dc583b10348306effb0] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=51.1 K 2024-11-21T00:28:22,065 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2aabb07c9aab4f00a6e2637309575b89, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732148898181 2024-11-21T00:28:22,065 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 73c19d54a5d34dc2baa579eab958f4ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732148899326 2024-11-21T00:28:22,066 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d8d87e3cbff4472ab584087175b9eab3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732148899998 2024-11-21T00:28:22,066 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:22,066 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b31f0ddc4a004dc583b10348306effb0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732148901124 2024-11-21T00:28:22,066 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:22,066 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:22,066 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/aa760f13b6034882b3e141a1c9b840dd, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/487af1b88b7e4d8d9f7c96f504c9a067, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/03da2f6ad48a400ca9a6901130d5ba72, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/318fbafdd7984f5eb79b04ef29f57c4d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=48.7 K 2024-11-21T00:28:22,067 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa760f13b6034882b3e141a1c9b840dd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732148898181 2024-11-21T00:28:22,068 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 487af1b88b7e4d8d9f7c96f504c9a067, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732148899326 2024-11-21T00:28:22,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:22,069 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03da2f6ad48a400ca9a6901130d5ba72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732148899998 2024-11-21T00:28:22,069 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 318fbafdd7984f5eb79b04ef29f57c4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732148901124 2024-11-21T00:28:22,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:22,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:22,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:22,076 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:22,076 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/fdeba79bca6b4ffcb6aae12fec202127 is 50, key is test_row_0/A:col10/1732148901132/Put/seqid=0 2024-11-21T00:28:22,085 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#259 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:22,085 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f8872eaff8d242b8974677e6cc9b44c8 is 50, key is test_row_0/B:col10/1732148901132/Put/seqid=0 2024-11-21T00:28:22,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742129_1305 (size=13119) 2024-11-21T00:28:22,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742130_1306 (size=13119) 2024-11-21T00:28:22,117 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f8872eaff8d242b8974677e6cc9b44c8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8872eaff8d242b8974677e6cc9b44c8 2024-11-21T00:28:22,123 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into f8872eaff8d242b8974677e6cc9b44c8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:22,123 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:22,123 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=12, startTime=1732148902064; duration=0sec 2024-11-21T00:28:22,123 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:22,123 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:22,123 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:22,125 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:22,125 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:22,125 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:22,125 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/44000524fb624e3aaacb56a7a45b2214, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c33df9504dad4a298ddf8f6b973010c0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7eecc53573484b87b8c8b361d6f97fef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/06990b80b1bf444e82aa668792c3ad47] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=48.7 K 2024-11-21T00:28:22,125 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44000524fb624e3aaacb56a7a45b2214, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732148898181 2024-11-21T00:28:22,125 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c33df9504dad4a298ddf8f6b973010c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732148899326 2024-11-21T00:28:22,126 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7eecc53573484b87b8c8b361d6f97fef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732148899998 2024-11-21T00:28:22,126 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06990b80b1bf444e82aa668792c3ad47, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732148901124 2024-11-21T00:28:22,136 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#260 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:22,137 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6364f931f05645ff93ce26494b93b3ae is 50, key is test_row_0/C:col10/1732148901132/Put/seqid=0 2024-11-21T00:28:22,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742131_1307 (size=13119) 2024-11-21T00:28:22,147 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:22,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-21T00:28:22,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:22,148 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:22,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:22,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:22,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:22,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:22,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:22,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:22,155 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/6364f931f05645ff93ce26494b93b3ae as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6364f931f05645ff93ce26494b93b3ae 2024-11-21T00:28:22,161 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into 6364f931f05645ff93ce26494b93b3ae(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:22,161 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:22,161 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=12, startTime=1732148902069; duration=0sec 2024-11-21T00:28:22,162 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:22,162 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:22,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/571f6f7c4cd1415bbb8f906de590c805 is 50, key is test_row_0/A:col10/1732148901212/Put/seqid=0 2024-11-21T00:28:22,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-21T00:28:22,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742132_1308 (size=12301) 2024-11-21T00:28:22,186 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/571f6f7c4cd1415bbb8f906de590c805 2024-11-21T00:28:22,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/90c105c8c5ed4e81b0ea3b7d05c477ca is 50, key is test_row_0/B:col10/1732148901212/Put/seqid=0 2024-11-21T00:28:22,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742133_1309 (size=12301) 2024-11-21T00:28:22,211 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/90c105c8c5ed4e81b0ea3b7d05c477ca 2024-11-21T00:28:22,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d1fb5f03a65b4772b1a910f41e8a3874 is 50, key is test_row_0/C:col10/1732148901212/Put/seqid=0 2024-11-21T00:28:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742134_1310 (size=12301) 2024-11-21T00:28:22,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:22,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:22,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:22,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148962362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:22,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:22,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148962466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:22,500 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/fdeba79bca6b4ffcb6aae12fec202127 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/fdeba79bca6b4ffcb6aae12fec202127 2024-11-21T00:28:22,504 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into fdeba79bca6b4ffcb6aae12fec202127(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:22,505 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:22,505 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=12, startTime=1732148902062; duration=0sec 2024-11-21T00:28:22,505 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:22,505 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:22,623 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d1fb5f03a65b4772b1a910f41e8a3874 2024-11-21T00:28:22,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/571f6f7c4cd1415bbb8f906de590c805 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/571f6f7c4cd1415bbb8f906de590c805 2024-11-21T00:28:22,635 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/571f6f7c4cd1415bbb8f906de590c805, entries=150, sequenceid=363, filesize=12.0 K 2024-11-21T00:28:22,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/90c105c8c5ed4e81b0ea3b7d05c477ca as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/90c105c8c5ed4e81b0ea3b7d05c477ca 2024-11-21T00:28:22,640 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/90c105c8c5ed4e81b0ea3b7d05c477ca, entries=150, sequenceid=363, filesize=12.0 K 2024-11-21T00:28:22,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d1fb5f03a65b4772b1a910f41e8a3874 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d1fb5f03a65b4772b1a910f41e8a3874 2024-11-21T00:28:22,646 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d1fb5f03a65b4772b1a910f41e8a3874, entries=150, sequenceid=363, filesize=12.0 K 2024-11-21T00:28:22,647 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1847e2b1a8a082929629d13ff179eb01 in 499ms, sequenceid=363, compaction requested=false 2024-11-21T00:28:22,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:22,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:22,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-21T00:28:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-21T00:28:22,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-21T00:28:22,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5760 sec 2024-11-21T00:28:22,651 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.5800 sec 2024-11-21T00:28:22,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:22,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:22,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:22,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:22,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:22,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:22,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:22,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:22,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/9af23cabe71f4582b72af65fc9e0b0d9 is 50, key is test_row_0/A:col10/1732148902669/Put/seqid=0 2024-11-21T00:28:22,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742135_1311 (size=12301) 2024-11-21T00:28:22,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148962738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:22,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148962842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:23,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148963046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/9af23cabe71f4582b72af65fc9e0b0d9 2024-11-21T00:28:23,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/c4f6f04100a1425aa587c9b20605098c is 50, key is test_row_0/B:col10/1732148902669/Put/seqid=0 2024-11-21T00:28:23,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742136_1312 (size=12301) 2024-11-21T00:28:23,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-21T00:28:23,175 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-21T00:28:23,177 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:23,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-21T00:28:23,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-21T00:28:23,178 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:23,180 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:23,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:23,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-21T00:28:23,331 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-21T00:28:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:23,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148963352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-21T00:28:23,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-21T00:28:23,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:23,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/c4f6f04100a1425aa587c9b20605098c 2024-11-21T00:28:23,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/dd9ad4da8ee4495e89bcb5efcc81dbc2 is 50, key is test_row_0/C:col10/1732148902669/Put/seqid=0 2024-11-21T00:28:23,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742137_1313 (size=12301) 2024-11-21T00:28:23,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-21T00:28:23,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:23,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-21T00:28:23,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-21T00:28:23,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:23,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:23,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148963860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,963 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:23,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-21T00:28:23,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:23,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:23,963 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:23,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/dd9ad4da8ee4495e89bcb5efcc81dbc2 2024-11-21T00:28:23,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/9af23cabe71f4582b72af65fc9e0b0d9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/9af23cabe71f4582b72af65fc9e0b0d9 2024-11-21T00:28:24,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/9af23cabe71f4582b72af65fc9e0b0d9, entries=150, sequenceid=377, filesize=12.0 K 2024-11-21T00:28:24,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/c4f6f04100a1425aa587c9b20605098c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c4f6f04100a1425aa587c9b20605098c 2024-11-21T00:28:24,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c4f6f04100a1425aa587c9b20605098c, entries=150, sequenceid=377, filesize=12.0 K 2024-11-21T00:28:24,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/dd9ad4da8ee4495e89bcb5efcc81dbc2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/dd9ad4da8ee4495e89bcb5efcc81dbc2 2024-11-21T00:28:24,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/dd9ad4da8ee4495e89bcb5efcc81dbc2, entries=150, sequenceid=377, filesize=12.0 K 2024-11-21T00:28:24,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 1385ms, sequenceid=377, compaction requested=true 2024-11-21T00:28:24,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:24,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:24,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:24,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:24,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:28:24,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:24,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-21T00:28:24,056 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:24,057 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:24,059 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:24,059 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:24,059 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:24,059 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/fdeba79bca6b4ffcb6aae12fec202127, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/571f6f7c4cd1415bbb8f906de590c805, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/9af23cabe71f4582b72af65fc9e0b0d9] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.8 K 2024-11-21T00:28:24,060 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:24,060 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:24,060 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:24,060 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6364f931f05645ff93ce26494b93b3ae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d1fb5f03a65b4772b1a910f41e8a3874, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/dd9ad4da8ee4495e89bcb5efcc81dbc2] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.8 K 2024-11-21T00:28:24,060 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdeba79bca6b4ffcb6aae12fec202127, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732148901124 2024-11-21T00:28:24,061 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6364f931f05645ff93ce26494b93b3ae, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732148901124 2024-11-21T00:28:24,062 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 571f6f7c4cd1415bbb8f906de590c805, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732148901202 2024-11-21T00:28:24,062 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d1fb5f03a65b4772b1a910f41e8a3874, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732148901202 2024-11-21T00:28:24,063 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9af23cabe71f4582b72af65fc9e0b0d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732148902357 2024-11-21T00:28:24,065 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting dd9ad4da8ee4495e89bcb5efcc81dbc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732148902357 2024-11-21T00:28:24,116 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,121 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#268 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:24,121 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5099c689e3624e00862e40cca2d042d2 is 50, key is test_row_0/A:col10/1732148902669/Put/seqid=0 2024-11-21T00:28:24,143 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#267 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:24,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-21T00:28:24,144 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d9832f76dc9942ad88dd07fa2813e0c7 is 50, key is test_row_0/C:col10/1732148902669/Put/seqid=0 2024-11-21T00:28:24,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:24,148 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-21T00:28:24,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:24,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:24,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:24,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:24,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:24,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:24,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742138_1314 (size=13221) 2024-11-21T00:28:24,188 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5099c689e3624e00862e40cca2d042d2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5099c689e3624e00862e40cca2d042d2 2024-11-21T00:28:24,202 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into 5099c689e3624e00862e40cca2d042d2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:24,202 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:24,202 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=13, startTime=1732148904056; duration=0sec 2024-11-21T00:28:24,202 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:24,202 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:24,202 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:24,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/1a363d046fec44a699106b69d37b5844 is 50, key is test_row_0/A:col10/1732148902736/Put/seqid=0 2024-11-21T00:28:24,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742139_1315 (size=13221) 2024-11-21T00:28:24,206 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:24,206 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:24,206 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:24,206 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8872eaff8d242b8974677e6cc9b44c8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/90c105c8c5ed4e81b0ea3b7d05c477ca, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c4f6f04100a1425aa587c9b20605098c] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=36.8 K 2024-11-21T00:28:24,207 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8872eaff8d242b8974677e6cc9b44c8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732148901124 2024-11-21T00:28:24,207 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90c105c8c5ed4e81b0ea3b7d05c477ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732148901202 2024-11-21T00:28:24,207 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4f6f04100a1425aa587c9b20605098c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732148902357 2024-11-21T00:28:24,232 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:24,233 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f0946759d62b4efaab7d133cca9366bd is 50, key is test_row_0/B:col10/1732148902669/Put/seqid=0 2024-11-21T00:28:24,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742140_1316 (size=12301) 2024-11-21T00:28:24,252 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/1a363d046fec44a699106b69d37b5844 2024-11-21T00:28:24,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-21T00:28:24,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742141_1317 (size=13221) 2024-11-21T00:28:24,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/e6af6a09d2c744e086bad2761a65e345 is 50, key is test_row_0/B:col10/1732148902736/Put/seqid=0 2024-11-21T00:28:24,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742142_1318 (size=12301) 2024-11-21T00:28:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:24,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:24,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148964546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43262 deadline: 1732148964548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,551 DEBUG [Thread-1080 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:24,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148964550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43228 deadline: 1732148964561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,564 DEBUG [Thread-1088 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:24,615 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d9832f76dc9942ad88dd07fa2813e0c7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d9832f76dc9942ad88dd07fa2813e0c7 2024-11-21T00:28:24,622 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into d9832f76dc9942ad88dd07fa2813e0c7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:24,622 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:24,622 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=13, startTime=1732148904056; duration=0sec 2024-11-21T00:28:24,622 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:24,622 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:24,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148964655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148964655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,696 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f0946759d62b4efaab7d133cca9366bd as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f0946759d62b4efaab7d133cca9366bd 2024-11-21T00:28:24,703 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into f0946759d62b4efaab7d133cca9366bd(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:24,703 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:24,703 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=13, startTime=1732148904056; duration=0sec 2024-11-21T00:28:24,703 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:24,703 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:24,734 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/e6af6a09d2c744e086bad2761a65e345 2024-11-21T00:28:24,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/423d3016bd4647689875fa2b40944358 is 50, key is test_row_0/C:col10/1732148902736/Put/seqid=0 2024-11-21T00:28:24,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742143_1319 (size=12301) 2024-11-21T00:28:24,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148964863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148964864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:24,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:24,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148964879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:25,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148965172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:25,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148965182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,202 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/423d3016bd4647689875fa2b40944358 2024-11-21T00:28:25,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/1a363d046fec44a699106b69d37b5844 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1a363d046fec44a699106b69d37b5844 2024-11-21T00:28:25,229 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1a363d046fec44a699106b69d37b5844, entries=150, sequenceid=401, filesize=12.0 K 2024-11-21T00:28:25,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/e6af6a09d2c744e086bad2761a65e345 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e6af6a09d2c744e086bad2761a65e345 2024-11-21T00:28:25,241 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e6af6a09d2c744e086bad2761a65e345, entries=150, sequenceid=401, filesize=12.0 K 2024-11-21T00:28:25,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/423d3016bd4647689875fa2b40944358 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/423d3016bd4647689875fa2b40944358 2024-11-21T00:28:25,259 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/423d3016bd4647689875fa2b40944358, entries=150, sequenceid=401, filesize=12.0 K 2024-11-21T00:28:25,260 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1847e2b1a8a082929629d13ff179eb01 in 1113ms, sequenceid=401, compaction requested=false 2024-11-21T00:28:25,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:25,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:25,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-21T00:28:25,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-21T00:28:25,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-21T00:28:25,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0810 sec 2024-11-21T00:28:25,267 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.0880 sec 2024-11-21T00:28:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-21T00:28:25,284 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-21T00:28:25,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-21T00:28:25,287 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-21T00:28:25,287 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:25,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-21T00:28:25,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-21T00:28:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:25,441 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:25,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:25,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/06829ee308524f1db746b308769074b6 is 50, key is test_row_0/A:col10/1732148904543/Put/seqid=0 2024-11-21T00:28:25,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742144_1320 (size=12301) 2024-11-21T00:28:25,500 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/06829ee308524f1db746b308769074b6 2024-11-21T00:28:25,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/76e3747117504c2da182c7795cab6579 is 50, key is test_row_0/B:col10/1732148904543/Put/seqid=0 2024-11-21T00:28:25,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742145_1321 (size=12301) 2024-11-21T00:28:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-21T00:28:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:25,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:25,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148965729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148965730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148965833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148965836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-21T00:28:25,947 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/76e3747117504c2da182c7795cab6579 2024-11-21T00:28:25,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/e4733b6967524891948d396749595c86 is 50, key is test_row_0/C:col10/1732148904543/Put/seqid=0 2024-11-21T00:28:25,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742146_1322 (size=12301) 2024-11-21T00:28:25,982 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/e4733b6967524891948d396749595c86 2024-11-21T00:28:25,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/06829ee308524f1db746b308769074b6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/06829ee308524f1db746b308769074b6 2024-11-21T00:28:25,994 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/06829ee308524f1db746b308769074b6, entries=150, sequenceid=416, filesize=12.0 K 2024-11-21T00:28:25,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/76e3747117504c2da182c7795cab6579 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/76e3747117504c2da182c7795cab6579 2024-11-21T00:28:26,005 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/76e3747117504c2da182c7795cab6579, entries=150, sequenceid=416, filesize=12.0 K 2024-11-21T00:28:26,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/e4733b6967524891948d396749595c86 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/e4733b6967524891948d396749595c86 2024-11-21T00:28:26,018 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/e4733b6967524891948d396749595c86, entries=150, sequenceid=416, filesize=12.0 K 2024-11-21T00:28:26,020 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 579ms, sequenceid=416, compaction requested=true 2024-11-21T00:28:26,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:26,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-21T00:28:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-21T00:28:26,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-21T00:28:26,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 735 msec 2024-11-21T00:28:26,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 742 msec 2024-11-21T00:28:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:26,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:26,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:26,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:26,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:26,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:26,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:26,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:26,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/689b65cf7ba84909bdd90d79c687b0d3 is 50, key is test_row_0/A:col10/1732148905728/Put/seqid=0 2024-11-21T00:28:26,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148966082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148966084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742147_1323 (size=14741) 2024-11-21T00:28:26,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148966189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148966197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-21T00:28:26,392 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-21T00:28:26,393 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:26,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-21T00:28:26,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-21T00:28:26,395 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:26,395 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:26,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:26,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148966393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148966404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-21T00:28:26,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/689b65cf7ba84909bdd90d79c687b0d3 2024-11-21T00:28:26,547 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-21T00:28:26,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:26,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:26,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f8681bd3291a420bb5fa10e7c455a82f is 50, key is test_row_0/B:col10/1732148905728/Put/seqid=0 2024-11-21T00:28:26,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742148_1324 (size=12301) 2024-11-21T00:28:26,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f8681bd3291a420bb5fa10e7c455a82f 2024-11-21T00:28:26,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/b0bb4813982d426288c81d9241288bcf is 50, key is test_row_0/C:col10/1732148905728/Put/seqid=0 2024-11-21T00:28:26,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742149_1325 (size=12301) 2024-11-21T00:28:26,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/b0bb4813982d426288c81d9241288bcf 2024-11-21T00:28:26,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/689b65cf7ba84909bdd90d79c687b0d3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/689b65cf7ba84909bdd90d79c687b0d3 2024-11-21T00:28:26,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/689b65cf7ba84909bdd90d79c687b0d3, entries=200, sequenceid=441, filesize=14.4 K 2024-11-21T00:28:26,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f8681bd3291a420bb5fa10e7c455a82f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8681bd3291a420bb5fa10e7c455a82f 2024-11-21T00:28:26,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-21T00:28:26,700 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-21T00:28:26,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8681bd3291a420bb5fa10e7c455a82f, entries=150, sequenceid=441, filesize=12.0 K 2024-11-21T00:28:26,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:26,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:26,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:26,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:26,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/b0bb4813982d426288c81d9241288bcf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/b0bb4813982d426288c81d9241288bcf 2024-11-21T00:28:26,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148966700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:26,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/b0bb4813982d426288c81d9241288bcf, entries=150, sequenceid=441, filesize=12.0 K 2024-11-21T00:28:26,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148966708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 1847e2b1a8a082929629d13ff179eb01 in 671ms, sequenceid=441, compaction requested=true 2024-11-21T00:28:26,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:26,714 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:26,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:26,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:26,715 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:26,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:26,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:26,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:26,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:26,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:26,720 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:26,720 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,720 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f0946759d62b4efaab7d133cca9366bd, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e6af6a09d2c744e086bad2761a65e345, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/76e3747117504c2da182c7795cab6579, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8681bd3291a420bb5fa10e7c455a82f] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=48.9 K 2024-11-21T00:28:26,721 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52564 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:26,721 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:26,721 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,721 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5099c689e3624e00862e40cca2d042d2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1a363d046fec44a699106b69d37b5844, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/06829ee308524f1db746b308769074b6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/689b65cf7ba84909bdd90d79c687b0d3] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=51.3 K 2024-11-21T00:28:26,722 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f0946759d62b4efaab7d133cca9366bd, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732148902357 2024-11-21T00:28:26,723 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5099c689e3624e00862e40cca2d042d2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732148902357 2024-11-21T00:28:26,723 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e6af6a09d2c744e086bad2761a65e345, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1732148902722 2024-11-21T00:28:26,723 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a363d046fec44a699106b69d37b5844, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1732148902722 2024-11-21T00:28:26,723 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 76e3747117504c2da182c7795cab6579, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732148904543 2024-11-21T00:28:26,724 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f8681bd3291a420bb5fa10e7c455a82f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732148905723 2024-11-21T00:28:26,724 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06829ee308524f1db746b308769074b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732148904543 2024-11-21T00:28:26,725 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 689b65cf7ba84909bdd90d79c687b0d3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732148905723 2024-11-21T00:28:26,757 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#279 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:26,758 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/03b7239f509841d1b431b96367ec0492 is 50, key is test_row_0/A:col10/1732148905728/Put/seqid=0 2024-11-21T00:28:26,769 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#280 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:26,770 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/64e80aaf45ad49b6a6195410627ba15e is 50, key is test_row_0/B:col10/1732148905728/Put/seqid=0 2024-11-21T00:28:26,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742150_1326 (size=13357) 2024-11-21T00:28:26,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742151_1327 (size=13357) 2024-11-21T00:28:26,837 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/64e80aaf45ad49b6a6195410627ba15e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/64e80aaf45ad49b6a6195410627ba15e 2024-11-21T00:28:26,843 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into 64e80aaf45ad49b6a6195410627ba15e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:26,843 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:26,843 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=12, startTime=1732148906715; duration=0sec 2024-11-21T00:28:26,843 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:26,843 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:26,843 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:26,845 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:26,845 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:26,845 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,845 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d9832f76dc9942ad88dd07fa2813e0c7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/423d3016bd4647689875fa2b40944358, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/e4733b6967524891948d396749595c86, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/b0bb4813982d426288c81d9241288bcf] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=48.9 K 2024-11-21T00:28:26,846 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d9832f76dc9942ad88dd07fa2813e0c7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732148902357 2024-11-21T00:28:26,846 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 423d3016bd4647689875fa2b40944358, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1732148902722 2024-11-21T00:28:26,847 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e4733b6967524891948d396749595c86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732148904543 2024-11-21T00:28:26,847 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b0bb4813982d426288c81d9241288bcf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732148905723 2024-11-21T00:28:26,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:26,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-21T00:28:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:26,859 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:26,864 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#281 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:26,865 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/034802cd4e6f42dd8ad7c5de72acb3f9 is 50, key is test_row_0/C:col10/1732148905728/Put/seqid=0 2024-11-21T00:28:26,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/e2a82c384047439e82c760592aa6a81d is 50, key is test_row_0/A:col10/1732148906080/Put/seqid=0 2024-11-21T00:28:26,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:26,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:26,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742152_1328 (size=13357) 2024-11-21T00:28:26,950 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/034802cd4e6f42dd8ad7c5de72acb3f9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/034802cd4e6f42dd8ad7c5de72acb3f9 2024-11-21T00:28:26,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742153_1329 (size=12301) 2024-11-21T00:28:26,958 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/e2a82c384047439e82c760592aa6a81d 2024-11-21T00:28:26,959 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into 034802cd4e6f42dd8ad7c5de72acb3f9(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:26,959 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:26,959 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=12, startTime=1732148906715; duration=0sec 2024-11-21T00:28:26,959 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:26,959 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:26,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/fafe1d7342ad4726a6e3eb526247b736 is 50, key is test_row_0/B:col10/1732148906080/Put/seqid=0 2024-11-21T00:28:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-21T00:28:27,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742154_1330 (size=12301) 2024-11-21T00:28:27,043 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/fafe1d7342ad4726a6e3eb526247b736 2024-11-21T00:28:27,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:27,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148967056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:27,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/0f9dbe90d317400e873fe0c8f4d65f7c is 50, key is test_row_0/C:col10/1732148906080/Put/seqid=0 2024-11-21T00:28:27,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742155_1331 (size=12301) 2024-11-21T00:28:27,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:27,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148967160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:27,213 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/03b7239f509841d1b431b96367ec0492 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/03b7239f509841d1b431b96367ec0492 2024-11-21T00:28:27,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:27,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148967215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:27,220 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into 03b7239f509841d1b431b96367ec0492(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:27,220 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:27,220 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=12, startTime=1732148906714; duration=0sec 2024-11-21T00:28:27,220 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:27,220 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:27,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:27,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148967219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:27,298 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:27,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:27,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148967367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-21T00:28:27,532 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/0f9dbe90d317400e873fe0c8f4d65f7c 2024-11-21T00:28:27,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/e2a82c384047439e82c760592aa6a81d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/e2a82c384047439e82c760592aa6a81d 2024-11-21T00:28:27,544 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/e2a82c384047439e82c760592aa6a81d, entries=150, sequenceid=454, filesize=12.0 K 2024-11-21T00:28:27,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/fafe1d7342ad4726a6e3eb526247b736 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fafe1d7342ad4726a6e3eb526247b736 2024-11-21T00:28:27,549 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fafe1d7342ad4726a6e3eb526247b736, entries=150, sequenceid=454, filesize=12.0 K 2024-11-21T00:28:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/0f9dbe90d317400e873fe0c8f4d65f7c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/0f9dbe90d317400e873fe0c8f4d65f7c 2024-11-21T00:28:27,554 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/0f9dbe90d317400e873fe0c8f4d65f7c, entries=150, sequenceid=454, filesize=12.0 K 2024-11-21T00:28:27,555 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 695ms, sequenceid=454, compaction requested=false 2024-11-21T00:28:27,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:27,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:27,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-21T00:28:27,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-21T00:28:27,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-21T00:28:27,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1620 sec 2024-11-21T00:28:27,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.1670 sec 2024-11-21T00:28:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:27,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:27,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:27,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:27,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:27,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:27,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:27,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:27,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/04d2426b1f304cd0b5b8a00335e409aa is 50, key is test_row_0/A:col10/1732148907054/Put/seqid=0 2024-11-21T00:28:27,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148967703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:27,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742156_1332 (size=12301) 2024-11-21T00:28:27,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148967808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:28,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148968011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/04d2426b1f304cd0b5b8a00335e409aa 2024-11-21T00:28:28,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f7bbb751e60e47639a73b16790657f8f is 50, key is test_row_0/B:col10/1732148907054/Put/seqid=0 2024-11-21T00:28:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742157_1333 (size=12301) 2024-11-21T00:28:28,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f7bbb751e60e47639a73b16790657f8f 2024-11-21T00:28:28,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/a5bb2bfe3d43491f90e6558500c489bb is 50, key is test_row_0/C:col10/1732148907054/Put/seqid=0 2024-11-21T00:28:28,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:28,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43246 deadline: 1732148968225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:28,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43274 deadline: 1732148968228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742158_1334 (size=12301) 2024-11-21T00:28:28,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/a5bb2bfe3d43491f90e6558500c489bb 2024-11-21T00:28:28,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/04d2426b1f304cd0b5b8a00335e409aa as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/04d2426b1f304cd0b5b8a00335e409aa 2024-11-21T00:28:28,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/04d2426b1f304cd0b5b8a00335e409aa, entries=150, sequenceid=481, filesize=12.0 K 2024-11-21T00:28:28,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/f7bbb751e60e47639a73b16790657f8f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f7bbb751e60e47639a73b16790657f8f 2024-11-21T00:28:28,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f7bbb751e60e47639a73b16790657f8f, entries=150, sequenceid=481, filesize=12.0 K 2024-11-21T00:28:28,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/a5bb2bfe3d43491f90e6558500c489bb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/a5bb2bfe3d43491f90e6558500c489bb 2024-11-21T00:28:28,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/a5bb2bfe3d43491f90e6558500c489bb, entries=150, sequenceid=481, filesize=12.0 K 2024-11-21T00:28:28,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1847e2b1a8a082929629d13ff179eb01 in 617ms, sequenceid=481, compaction requested=true 2024-11-21T00:28:28,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:28,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:28,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:28,289 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:28,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:28,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:28,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:28,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:28:28,289 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:28,290 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:28,290 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:28,290 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,290 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/03b7239f509841d1b431b96367ec0492, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/e2a82c384047439e82c760592aa6a81d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/04d2426b1f304cd0b5b8a00335e409aa] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=37.1 K 2024-11-21T00:28:28,291 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:28,291 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:28,291 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03b7239f509841d1b431b96367ec0492, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732148905723 2024-11-21T00:28:28,291 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,291 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/64e80aaf45ad49b6a6195410627ba15e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fafe1d7342ad4726a6e3eb526247b736, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f7bbb751e60e47639a73b16790657f8f] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=37.1 K 2024-11-21T00:28:28,291 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 64e80aaf45ad49b6a6195410627ba15e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732148905723 2024-11-21T00:28:28,292 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fafe1d7342ad4726a6e3eb526247b736, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732148906048 2024-11-21T00:28:28,292 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2a82c384047439e82c760592aa6a81d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732148906048 2024-11-21T00:28:28,293 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04d2426b1f304cd0b5b8a00335e409aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732148907050 2024-11-21T00:28:28,293 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f7bbb751e60e47639a73b16790657f8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732148907050 2024-11-21T00:28:28,318 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#288 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:28,319 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/670c8de2543f4cb190c2e7ddf5bada4e is 50, key is test_row_0/A:col10/1732148907054/Put/seqid=0 2024-11-21T00:28:28,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:28,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:28,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:28,326 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#289 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:28,327 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/1da591fb913c44248faeb904dca9f8ae is 50, key is test_row_0/B:col10/1732148907054/Put/seqid=0 2024-11-21T00:28:28,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5e5971b78b0e4fb28d09bff715a93823 is 50, key is test_row_0/A:col10/1732148908320/Put/seqid=0 2024-11-21T00:28:28,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742159_1335 (size=13459) 2024-11-21T00:28:28,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742160_1336 (size=13459) 2024-11-21T00:28:28,401 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/670c8de2543f4cb190c2e7ddf5bada4e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/670c8de2543f4cb190c2e7ddf5bada4e 2024-11-21T00:28:28,410 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/1da591fb913c44248faeb904dca9f8ae as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/1da591fb913c44248faeb904dca9f8ae 2024-11-21T00:28:28,414 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into 670c8de2543f4cb190c2e7ddf5bada4e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:28,414 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:28,414 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=13, startTime=1732148908289; duration=0sec 2024-11-21T00:28:28,414 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:28,414 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:28,414 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:28,416 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:28,416 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:28,416 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,417 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/034802cd4e6f42dd8ad7c5de72acb3f9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/0f9dbe90d317400e873fe0c8f4d65f7c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/a5bb2bfe3d43491f90e6558500c489bb] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=37.1 K 2024-11-21T00:28:28,418 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 034802cd4e6f42dd8ad7c5de72acb3f9, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732148905723 2024-11-21T00:28:28,419 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f9dbe90d317400e873fe0c8f4d65f7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732148906048 2024-11-21T00:28:28,420 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5bb2bfe3d43491f90e6558500c489bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732148907050 2024-11-21T00:28:28,420 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into 1da591fb913c44248faeb904dca9f8ae(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:28,420 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:28,420 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=13, startTime=1732148908289; duration=0sec 2024-11-21T00:28:28,421 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:28,421 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:28,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742161_1337 (size=12301) 2024-11-21T00:28:28,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5e5971b78b0e4fb28d09bff715a93823 2024-11-21T00:28:28,481 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:28,482 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/c596bb2fb3bf4d86a25613eb7151d8b3 is 50, key is test_row_0/C:col10/1732148907054/Put/seqid=0 2024-11-21T00:28:28,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/778cb752faed4cb382d5e18811bd2db8 is 50, key is test_row_0/B:col10/1732148908320/Put/seqid=0 2024-11-21T00:28:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-21T00:28:28,498 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-21T00:28:28,499 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:28,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-21T00:28:28,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:28,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-21T00:28:28,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148968498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,502 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:28,503 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:28,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:28,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742162_1338 (size=13459) 2024-11-21T00:28:28,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742163_1339 (size=12301) 2024-11-21T00:28:28,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-21T00:28:28,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:28,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148968602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,608 DEBUG [Thread-1093 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x712d7bc3 to 127.0.0.1:64241 2024-11-21T00:28:28,608 DEBUG [Thread-1093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,609 DEBUG [Thread-1099 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ff3c1a9 to 127.0.0.1:64241 2024-11-21T00:28:28,609 DEBUG [Thread-1099 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,610 DEBUG [Thread-1095 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40da73c1 to 127.0.0.1:64241 2024-11-21T00:28:28,610 DEBUG [Thread-1095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,611 DEBUG [Thread-1097 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3be398a9 to 127.0.0.1:64241 2024-11-21T00:28:28,611 DEBUG [Thread-1097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,611 DEBUG [Thread-1091 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x124edab0 to 127.0.0.1:64241 2024-11-21T00:28:28,611 DEBUG [Thread-1091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,666 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:28,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:28,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-21T00:28:28,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:28,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43264 deadline: 1732148968805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:28,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:28,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,932 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/c596bb2fb3bf4d86a25613eb7151d8b3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c596bb2fb3bf4d86a25613eb7151d8b3 2024-11-21T00:28:28,937 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into c596bb2fb3bf4d86a25613eb7151d8b3(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:28,937 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:28,937 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=13, startTime=1732148908289; duration=0sec 2024-11-21T00:28:28,937 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:28,937 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:28,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/778cb752faed4cb382d5e18811bd2db8 2024-11-21T00:28:28,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/22347f7d325c48cd851d5527f0bf0716 is 50, key is test_row_0/C:col10/1732148908320/Put/seqid=0 2024-11-21T00:28:28,970 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:28,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:28,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742164_1340 (size=12301) 2024-11-21T00:28:28,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/22347f7d325c48cd851d5527f0bf0716 2024-11-21T00:28:28,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:28,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:28,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:28,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/5e5971b78b0e4fb28d09bff715a93823 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5e5971b78b0e4fb28d09bff715a93823 2024-11-21T00:28:28,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5e5971b78b0e4fb28d09bff715a93823, entries=150, sequenceid=493, filesize=12.0 K 2024-11-21T00:28:28,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/778cb752faed4cb382d5e18811bd2db8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/778cb752faed4cb382d5e18811bd2db8 2024-11-21T00:28:28,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/778cb752faed4cb382d5e18811bd2db8, entries=150, sequenceid=493, filesize=12.0 K 2024-11-21T00:28:28,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/22347f7d325c48cd851d5527f0bf0716 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/22347f7d325c48cd851d5527f0bf0716 2024-11-21T00:28:29,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/22347f7d325c48cd851d5527f0bf0716, entries=150, sequenceid=493, filesize=12.0 K 2024-11-21T00:28:29,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1847e2b1a8a082929629d13ff179eb01 in 681ms, sequenceid=493, compaction requested=false 2024-11-21T00:28:29,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-21T00:28:29,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:29,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:29,109 DEBUG [Thread-1084 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x282318cf to 127.0.0.1:64241 2024-11-21T00:28:29,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:29,109 DEBUG [Thread-1084 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:29,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:29,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:29,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:29,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:29,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:29,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/a83a4df1bf824315aceb33ec2903aa5f is 50, key is test_row_0/A:col10/1732148909107/Put/seqid=0 2024-11-21T00:28:29,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742165_1341 (size=12301) 2024-11-21T00:28:29,128 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:29,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:29,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:29,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,282 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:29,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:29,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:29,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,434 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:29,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:29,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:29,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/a83a4df1bf824315aceb33ec2903aa5f 2024-11-21T00:28:29,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/7ca177f7a9c24117ae4a03060c0b8154 is 50, key is test_row_0/B:col10/1732148909107/Put/seqid=0 2024-11-21T00:28:29,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742166_1342 (size=12301) 2024-11-21T00:28:29,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/7ca177f7a9c24117ae4a03060c0b8154 2024-11-21T00:28:29,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/bf4baad4d611417c997fb58d35722192 is 50, key is test_row_0/C:col10/1732148909107/Put/seqid=0 2024-11-21T00:28:29,588 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:29,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:29,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. as already flushing 2024-11-21T00:28:29,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:29,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742167_1343 (size=12301) 2024-11-21T00:28:29,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/bf4baad4d611417c997fb58d35722192 2024-11-21T00:28:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-21T00:28:29,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/a83a4df1bf824315aceb33ec2903aa5f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a83a4df1bf824315aceb33ec2903aa5f 2024-11-21T00:28:29,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a83a4df1bf824315aceb33ec2903aa5f, entries=150, sequenceid=521, filesize=12.0 K 2024-11-21T00:28:29,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/7ca177f7a9c24117ae4a03060c0b8154 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7ca177f7a9c24117ae4a03060c0b8154 2024-11-21T00:28:29,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7ca177f7a9c24117ae4a03060c0b8154, entries=150, sequenceid=521, filesize=12.0 K 2024-11-21T00:28:29,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/bf4baad4d611417c997fb58d35722192 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/bf4baad4d611417c997fb58d35722192 2024-11-21T00:28:29,632 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/bf4baad4d611417c997fb58d35722192, entries=150, sequenceid=521, filesize=12.0 K 2024-11-21T00:28:29,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 1847e2b1a8a082929629d13ff179eb01 in 524ms, sequenceid=521, compaction requested=true 2024-11-21T00:28:29,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:29,634 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:29,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:29,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:29,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:29,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:29,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:29,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1847e2b1a8a082929629d13ff179eb01:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:29,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:29,635 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:29,635 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/B is initiating minor compaction (all files) 2024-11-21T00:28:29,635 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/B in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,636 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/1da591fb913c44248faeb904dca9f8ae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/778cb752faed4cb382d5e18811bd2db8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7ca177f7a9c24117ae4a03060c0b8154] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=37.2 K 2024-11-21T00:28:29,636 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:29,636 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/A is initiating minor compaction (all files) 2024-11-21T00:28:29,636 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/A in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,636 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/670c8de2543f4cb190c2e7ddf5bada4e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5e5971b78b0e4fb28d09bff715a93823, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a83a4df1bf824315aceb33ec2903aa5f] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=37.2 K 2024-11-21T00:28:29,636 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1da591fb913c44248faeb904dca9f8ae, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732148907050 2024-11-21T00:28:29,636 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 670c8de2543f4cb190c2e7ddf5bada4e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732148907050 2024-11-21T00:28:29,637 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e5971b78b0e4fb28d09bff715a93823, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732148907692 2024-11-21T00:28:29,638 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 778cb752faed4cb382d5e18811bd2db8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732148907692 2024-11-21T00:28:29,638 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a83a4df1bf824315aceb33ec2903aa5f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732148908491 2024-11-21T00:28:29,638 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ca177f7a9c24117ae4a03060c0b8154, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732148908491 2024-11-21T00:28:29,659 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#B#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:29,660 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/498973cbbd0b496fbe65b0f39e477d37 is 50, key is test_row_0/B:col10/1732148909107/Put/seqid=0 2024-11-21T00:28:29,660 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#A#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:29,661 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/583b873251b347f8a3120919589b88c2 is 50, key is test_row_0/A:col10/1732148909107/Put/seqid=0 2024-11-21T00:28:29,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742169_1345 (size=13561) 2024-11-21T00:28:29,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742168_1344 (size=13561) 2024-11-21T00:28:29,741 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:29,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-21T00:28:29,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:29,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:29,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-21T00:28:29,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-21T00:28:29,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-21T00:28:29,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2410 sec 2024-11-21T00:28:29,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.2470 sec 2024-11-21T00:28:30,084 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/583b873251b347f8a3120919589b88c2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/583b873251b347f8a3120919589b88c2 2024-11-21T00:28:30,089 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/A of 1847e2b1a8a082929629d13ff179eb01 into 583b873251b347f8a3120919589b88c2(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:30,089 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:30,089 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/A, priority=13, startTime=1732148909633; duration=0sec 2024-11-21T00:28:30,089 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:30,089 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:A 2024-11-21T00:28:30,089 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:30,090 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:30,090 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 1847e2b1a8a082929629d13ff179eb01/C is initiating minor compaction (all files) 2024-11-21T00:28:30,090 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1847e2b1a8a082929629d13ff179eb01/C in TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:30,090 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c596bb2fb3bf4d86a25613eb7151d8b3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/22347f7d325c48cd851d5527f0bf0716, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/bf4baad4d611417c997fb58d35722192] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp, totalSize=37.2 K 2024-11-21T00:28:30,091 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c596bb2fb3bf4d86a25613eb7151d8b3, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1732148907050 2024-11-21T00:28:30,091 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22347f7d325c48cd851d5527f0bf0716, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732148907692 2024-11-21T00:28:30,091 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf4baad4d611417c997fb58d35722192, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1732148908491 2024-11-21T00:28:30,102 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1847e2b1a8a082929629d13ff179eb01#C#compaction#299 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:30,103 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d807e1a9e0d5416e97c2a6ab07b71a93 is 50, key is test_row_0/C:col10/1732148909107/Put/seqid=0 2024-11-21T00:28:30,108 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/498973cbbd0b496fbe65b0f39e477d37 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/498973cbbd0b496fbe65b0f39e477d37 2024-11-21T00:28:30,113 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/B of 1847e2b1a8a082929629d13ff179eb01 into 498973cbbd0b496fbe65b0f39e477d37(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:30,113 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:30,113 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/B, priority=13, startTime=1732148909634; duration=0sec 2024-11-21T00:28:30,114 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:30,114 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:B 2024-11-21T00:28:30,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742170_1346 (size=13561) 2024-11-21T00:28:30,250 DEBUG [Thread-1086 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ea91426 to 127.0.0.1:64241 2024-11-21T00:28:30,251 DEBUG [Thread-1086 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:30,254 DEBUG [Thread-1082 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x195206da to 127.0.0.1:64241 2024-11-21T00:28:30,254 DEBUG [Thread-1082 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:30,520 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/d807e1a9e0d5416e97c2a6ab07b71a93 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d807e1a9e0d5416e97c2a6ab07b71a93 2024-11-21T00:28:30,524 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1847e2b1a8a082929629d13ff179eb01/C of 1847e2b1a8a082929629d13ff179eb01 into d807e1a9e0d5416e97c2a6ab07b71a93(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:30,524 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:30,524 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01., storeName=1847e2b1a8a082929629d13ff179eb01/C, priority=13, startTime=1732148909634; duration=0sec 2024-11-21T00:28:30,524 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:30,524 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1847e2b1a8a082929629d13ff179eb01:C 2024-11-21T00:28:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-21T00:28:30,606 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-21T00:28:34,577 DEBUG [Thread-1080 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dc273c3 to 127.0.0.1:64241 2024-11-21T00:28:34,577 DEBUG [Thread-1080 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:34,633 DEBUG [Thread-1088 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x762de37e to 127.0.0.1:64241 2024-11-21T00:28:34,634 DEBUG [Thread-1088 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 36 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 189 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4532 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4488 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4330 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4509 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4477 2024-11-21T00:28:34,634 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-21T00:28:34,634 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-21T00:28:34,634 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3eec6530 to 127.0.0.1:64241 2024-11-21T00:28:34,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:34,635 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-21T00:28:34,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-21T00:28:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:34,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-21T00:28:34,639 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148914639"}]},"ts":"1732148914639"} 2024-11-21T00:28:34,643 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-21T00:28:34,655 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-21T00:28:34,656 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:28:34,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1847e2b1a8a082929629d13ff179eb01, UNASSIGN}] 2024-11-21T00:28:34,664 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1847e2b1a8a082929629d13ff179eb01, UNASSIGN 2024-11-21T00:28:34,664 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=1847e2b1a8a082929629d13ff179eb01, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:34,665 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:28:34,665 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:28:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-21T00:28:34,817 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:34,817 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:34,817 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 1847e2b1a8a082929629d13ff179eb01, disabling compactions & flushes 2024-11-21T00:28:34,818 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. after waiting 0 ms 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:34,818 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(2837): Flushing 1847e2b1a8a082929629d13ff179eb01 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=A 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=B 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1847e2b1a8a082929629d13ff179eb01, store=C 2024-11-21T00:28:34,818 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:34,822 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/8489a78c3db049539df2f4cca687df62 is 50, key is test_row_0/A:col10/1732148914632/Put/seqid=0 2024-11-21T00:28:34,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742171_1347 (size=12301) 2024-11-21T00:28:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-21T00:28:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-21T00:28:35,242 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/8489a78c3db049539df2f4cca687df62 2024-11-21T00:28:35,255 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/8563fa695c5840a19ee6e5ae58173708 is 50, key is test_row_0/B:col10/1732148914632/Put/seqid=0 2024-11-21T00:28:35,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742172_1348 (size=12301) 2024-11-21T00:28:35,261 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/8563fa695c5840a19ee6e5ae58173708 2024-11-21T00:28:35,268 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/cd6ca4417c084d0b91ac305a3abc98c8 is 50, key is test_row_0/C:col10/1732148914632/Put/seqid=0 2024-11-21T00:28:35,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742173_1349 (size=12301) 2024-11-21T00:28:35,277 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/cd6ca4417c084d0b91ac305a3abc98c8 2024-11-21T00:28:35,282 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/A/8489a78c3db049539df2f4cca687df62 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8489a78c3db049539df2f4cca687df62 2024-11-21T00:28:35,287 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8489a78c3db049539df2f4cca687df62, entries=150, sequenceid=532, filesize=12.0 K 2024-11-21T00:28:35,288 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/B/8563fa695c5840a19ee6e5ae58173708 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8563fa695c5840a19ee6e5ae58173708 2024-11-21T00:28:35,291 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8563fa695c5840a19ee6e5ae58173708, entries=150, sequenceid=532, filesize=12.0 K 2024-11-21T00:28:35,292 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/.tmp/C/cd6ca4417c084d0b91ac305a3abc98c8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/cd6ca4417c084d0b91ac305a3abc98c8 2024-11-21T00:28:35,294 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/cd6ca4417c084d0b91ac305a3abc98c8, entries=150, sequenceid=532, filesize=12.0 K 2024-11-21T00:28:35,295 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 1847e2b1a8a082929629d13ff179eb01 in 477ms, sequenceid=532, compaction requested=false 2024-11-21T00:28:35,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4bd54ee0ab0a493bace7e46295ddfe82, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8db393263b2d46928a7a09c3741170ab, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6852d85acb184cd49f875b4cbeaee98a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/17038da050a54864b70963eee81ce44e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/12779cccbbab419d9132f8ad1ff11b90, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/064f44b9dc0f496488c4a0edee4cd2a1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4b7c031355a143858579bc77e9b38de5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d0a90098ddef4c5dbf4085b469d9fe00, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/87aba94518eb46c5aab5c97c4d6d348d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/38c7063c11624321af45e71b4df047e9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5bf0ee6f6ecb4d97a2e99e34df37e78d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a28cf042c3ce437fafce1b08945d3cae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1249c5adeb1e4f2282f3c476152aba10, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4d62c30868e14196a521a72b9e18367b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ae8a4e36de544445b7423a3e02b11a56, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6316c6f8877048bc93bd8d95f1939e66, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/826f42de597649218db7c66888b83a34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f8b436ed5c5a49508c86975d58407053, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f84abddbf00245539c73cba9dc173505, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/2aabb07c9aab4f00a6e2637309575b89, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ec37e0bfba92478caefd3dc36b072dc9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/73c19d54a5d34dc2baa579eab958f4ef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d8d87e3cbff4472ab584087175b9eab3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/b31f0ddc4a004dc583b10348306effb0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/fdeba79bca6b4ffcb6aae12fec202127, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/571f6f7c4cd1415bbb8f906de590c805, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5099c689e3624e00862e40cca2d042d2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/9af23cabe71f4582b72af65fc9e0b0d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1a363d046fec44a699106b69d37b5844, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/06829ee308524f1db746b308769074b6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/689b65cf7ba84909bdd90d79c687b0d3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/03b7239f509841d1b431b96367ec0492, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/e2a82c384047439e82c760592aa6a81d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/670c8de2543f4cb190c2e7ddf5bada4e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/04d2426b1f304cd0b5b8a00335e409aa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5e5971b78b0e4fb28d09bff715a93823, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a83a4df1bf824315aceb33ec2903aa5f] to archive 2024-11-21T00:28:35,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:28:35,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4bd54ee0ab0a493bace7e46295ddfe82 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4bd54ee0ab0a493bace7e46295ddfe82 2024-11-21T00:28:35,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8db393263b2d46928a7a09c3741170ab to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8db393263b2d46928a7a09c3741170ab 2024-11-21T00:28:35,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6852d85acb184cd49f875b4cbeaee98a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6852d85acb184cd49f875b4cbeaee98a 2024-11-21T00:28:35,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/17038da050a54864b70963eee81ce44e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/17038da050a54864b70963eee81ce44e 2024-11-21T00:28:35,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/12779cccbbab419d9132f8ad1ff11b90 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/12779cccbbab419d9132f8ad1ff11b90 2024-11-21T00:28:35,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/064f44b9dc0f496488c4a0edee4cd2a1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/064f44b9dc0f496488c4a0edee4cd2a1 2024-11-21T00:28:35,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4b7c031355a143858579bc77e9b38de5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4b7c031355a143858579bc77e9b38de5 2024-11-21T00:28:35,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d0a90098ddef4c5dbf4085b469d9fe00 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d0a90098ddef4c5dbf4085b469d9fe00 2024-11-21T00:28:35,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/87aba94518eb46c5aab5c97c4d6d348d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/87aba94518eb46c5aab5c97c4d6d348d 2024-11-21T00:28:35,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/38c7063c11624321af45e71b4df047e9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/38c7063c11624321af45e71b4df047e9 2024-11-21T00:28:35,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5bf0ee6f6ecb4d97a2e99e34df37e78d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5bf0ee6f6ecb4d97a2e99e34df37e78d 2024-11-21T00:28:35,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a28cf042c3ce437fafce1b08945d3cae to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a28cf042c3ce437fafce1b08945d3cae 2024-11-21T00:28:35,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1249c5adeb1e4f2282f3c476152aba10 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1249c5adeb1e4f2282f3c476152aba10 2024-11-21T00:28:35,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4d62c30868e14196a521a72b9e18367b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/4d62c30868e14196a521a72b9e18367b 2024-11-21T00:28:35,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ae8a4e36de544445b7423a3e02b11a56 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ae8a4e36de544445b7423a3e02b11a56 2024-11-21T00:28:35,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6316c6f8877048bc93bd8d95f1939e66 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/6316c6f8877048bc93bd8d95f1939e66 2024-11-21T00:28:35,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/826f42de597649218db7c66888b83a34 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/826f42de597649218db7c66888b83a34 2024-11-21T00:28:35,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f8b436ed5c5a49508c86975d58407053 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f8b436ed5c5a49508c86975d58407053 2024-11-21T00:28:35,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f84abddbf00245539c73cba9dc173505 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/f84abddbf00245539c73cba9dc173505 2024-11-21T00:28:35,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/2aabb07c9aab4f00a6e2637309575b89 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/2aabb07c9aab4f00a6e2637309575b89 2024-11-21T00:28:35,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ec37e0bfba92478caefd3dc36b072dc9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/ec37e0bfba92478caefd3dc36b072dc9 2024-11-21T00:28:35,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/73c19d54a5d34dc2baa579eab958f4ef to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/73c19d54a5d34dc2baa579eab958f4ef 2024-11-21T00:28:35,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d8d87e3cbff4472ab584087175b9eab3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/d8d87e3cbff4472ab584087175b9eab3 2024-11-21T00:28:35,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/b31f0ddc4a004dc583b10348306effb0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/b31f0ddc4a004dc583b10348306effb0 2024-11-21T00:28:35,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/fdeba79bca6b4ffcb6aae12fec202127 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/fdeba79bca6b4ffcb6aae12fec202127 2024-11-21T00:28:35,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/571f6f7c4cd1415bbb8f906de590c805 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/571f6f7c4cd1415bbb8f906de590c805 2024-11-21T00:28:35,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5099c689e3624e00862e40cca2d042d2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5099c689e3624e00862e40cca2d042d2 2024-11-21T00:28:35,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/9af23cabe71f4582b72af65fc9e0b0d9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/9af23cabe71f4582b72af65fc9e0b0d9 2024-11-21T00:28:35,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1a363d046fec44a699106b69d37b5844 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/1a363d046fec44a699106b69d37b5844 2024-11-21T00:28:35,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/06829ee308524f1db746b308769074b6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/06829ee308524f1db746b308769074b6 2024-11-21T00:28:35,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/689b65cf7ba84909bdd90d79c687b0d3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/689b65cf7ba84909bdd90d79c687b0d3 2024-11-21T00:28:35,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/03b7239f509841d1b431b96367ec0492 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/03b7239f509841d1b431b96367ec0492 2024-11-21T00:28:35,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/e2a82c384047439e82c760592aa6a81d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/e2a82c384047439e82c760592aa6a81d 2024-11-21T00:28:35,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/670c8de2543f4cb190c2e7ddf5bada4e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/670c8de2543f4cb190c2e7ddf5bada4e 2024-11-21T00:28:35,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/04d2426b1f304cd0b5b8a00335e409aa to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/04d2426b1f304cd0b5b8a00335e409aa 2024-11-21T00:28:35,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5e5971b78b0e4fb28d09bff715a93823 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/5e5971b78b0e4fb28d09bff715a93823 2024-11-21T00:28:35,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a83a4df1bf824315aceb33ec2903aa5f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/a83a4df1bf824315aceb33ec2903aa5f 2024-11-21T00:28:35,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/349a182e7d3448d4a74a1976e88f3978, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7200f446d67d48a4a6d00bc78653a85e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d519b374db964e63925b8079499ef94d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/825eb0efa0284e0598d551f20c1f81e0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0e248519d57a4ec0941405d742c34f08, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/eed015ad27884773a4cdf29d896aa0e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/935b5cdce7fd4717b5619fbe366d9871, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/89344ae3ad8740859b3e6c6e7f414387, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5e2c68e1ec67412ca246e5771ef92eb9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c249724ee31a46b7b198c0f7f13022bc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45925116307948dc9b807d48b5a5289d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8c5578136336493aac8946607e531e11, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0bbc330f9f47424a8a6922d99cfa3966, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/4dbcfc03f3bb465a81996e1f100899c5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e047e79d202a46be8559e1c618efc2ca, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/116602729c174aa68d11ecc1c3b56d24, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d9d4045e5ffb4ac98acb6a09bebf8c7e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fd3f15bd608744f3af11e95301a325cc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5ad3057cff7d404baac91e4e68e3e070, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/aa760f13b6034882b3e141a1c9b840dd, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45a0bcfb55ff417195cc641c5c6878a4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/487af1b88b7e4d8d9f7c96f504c9a067, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/03da2f6ad48a400ca9a6901130d5ba72, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8872eaff8d242b8974677e6cc9b44c8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/318fbafdd7984f5eb79b04ef29f57c4d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/90c105c8c5ed4e81b0ea3b7d05c477ca, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f0946759d62b4efaab7d133cca9366bd, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c4f6f04100a1425aa587c9b20605098c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e6af6a09d2c744e086bad2761a65e345, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/76e3747117504c2da182c7795cab6579, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/64e80aaf45ad49b6a6195410627ba15e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8681bd3291a420bb5fa10e7c455a82f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fafe1d7342ad4726a6e3eb526247b736, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/1da591fb913c44248faeb904dca9f8ae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f7bbb751e60e47639a73b16790657f8f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/778cb752faed4cb382d5e18811bd2db8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7ca177f7a9c24117ae4a03060c0b8154] to archive 2024-11-21T00:28:35,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:28:35,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/349a182e7d3448d4a74a1976e88f3978 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/349a182e7d3448d4a74a1976e88f3978 2024-11-21T00:28:35,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7200f446d67d48a4a6d00bc78653a85e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7200f446d67d48a4a6d00bc78653a85e 2024-11-21T00:28:35,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d519b374db964e63925b8079499ef94d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d519b374db964e63925b8079499ef94d 2024-11-21T00:28:35,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/825eb0efa0284e0598d551f20c1f81e0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/825eb0efa0284e0598d551f20c1f81e0 2024-11-21T00:28:35,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0e248519d57a4ec0941405d742c34f08 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0e248519d57a4ec0941405d742c34f08 2024-11-21T00:28:35,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/eed015ad27884773a4cdf29d896aa0e6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/eed015ad27884773a4cdf29d896aa0e6 2024-11-21T00:28:35,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/935b5cdce7fd4717b5619fbe366d9871 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/935b5cdce7fd4717b5619fbe366d9871 2024-11-21T00:28:35,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/89344ae3ad8740859b3e6c6e7f414387 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/89344ae3ad8740859b3e6c6e7f414387 2024-11-21T00:28:35,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5e2c68e1ec67412ca246e5771ef92eb9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5e2c68e1ec67412ca246e5771ef92eb9 2024-11-21T00:28:35,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c249724ee31a46b7b198c0f7f13022bc to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c249724ee31a46b7b198c0f7f13022bc 2024-11-21T00:28:35,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45925116307948dc9b807d48b5a5289d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45925116307948dc9b807d48b5a5289d 2024-11-21T00:28:35,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8c5578136336493aac8946607e531e11 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8c5578136336493aac8946607e531e11 2024-11-21T00:28:35,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0bbc330f9f47424a8a6922d99cfa3966 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/0bbc330f9f47424a8a6922d99cfa3966 2024-11-21T00:28:35,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/4dbcfc03f3bb465a81996e1f100899c5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/4dbcfc03f3bb465a81996e1f100899c5 2024-11-21T00:28:35,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e047e79d202a46be8559e1c618efc2ca to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e047e79d202a46be8559e1c618efc2ca 2024-11-21T00:28:35,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/116602729c174aa68d11ecc1c3b56d24 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/116602729c174aa68d11ecc1c3b56d24 2024-11-21T00:28:35,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d9d4045e5ffb4ac98acb6a09bebf8c7e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/d9d4045e5ffb4ac98acb6a09bebf8c7e 2024-11-21T00:28:35,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fd3f15bd608744f3af11e95301a325cc to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fd3f15bd608744f3af11e95301a325cc 2024-11-21T00:28:35,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5ad3057cff7d404baac91e4e68e3e070 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/5ad3057cff7d404baac91e4e68e3e070 2024-11-21T00:28:35,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/aa760f13b6034882b3e141a1c9b840dd to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/aa760f13b6034882b3e141a1c9b840dd 2024-11-21T00:28:35,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45a0bcfb55ff417195cc641c5c6878a4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/45a0bcfb55ff417195cc641c5c6878a4 2024-11-21T00:28:35,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/487af1b88b7e4d8d9f7c96f504c9a067 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/487af1b88b7e4d8d9f7c96f504c9a067 2024-11-21T00:28:35,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/03da2f6ad48a400ca9a6901130d5ba72 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/03da2f6ad48a400ca9a6901130d5ba72 2024-11-21T00:28:35,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8872eaff8d242b8974677e6cc9b44c8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8872eaff8d242b8974677e6cc9b44c8 2024-11-21T00:28:35,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/318fbafdd7984f5eb79b04ef29f57c4d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/318fbafdd7984f5eb79b04ef29f57c4d 2024-11-21T00:28:35,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/90c105c8c5ed4e81b0ea3b7d05c477ca to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/90c105c8c5ed4e81b0ea3b7d05c477ca 2024-11-21T00:28:35,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f0946759d62b4efaab7d133cca9366bd to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f0946759d62b4efaab7d133cca9366bd 2024-11-21T00:28:35,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c4f6f04100a1425aa587c9b20605098c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/c4f6f04100a1425aa587c9b20605098c 2024-11-21T00:28:35,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e6af6a09d2c744e086bad2761a65e345 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/e6af6a09d2c744e086bad2761a65e345 2024-11-21T00:28:35,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/76e3747117504c2da182c7795cab6579 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/76e3747117504c2da182c7795cab6579 2024-11-21T00:28:35,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/64e80aaf45ad49b6a6195410627ba15e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/64e80aaf45ad49b6a6195410627ba15e 2024-11-21T00:28:35,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8681bd3291a420bb5fa10e7c455a82f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f8681bd3291a420bb5fa10e7c455a82f 2024-11-21T00:28:35,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fafe1d7342ad4726a6e3eb526247b736 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/fafe1d7342ad4726a6e3eb526247b736 2024-11-21T00:28:35,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/1da591fb913c44248faeb904dca9f8ae to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/1da591fb913c44248faeb904dca9f8ae 2024-11-21T00:28:35,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f7bbb751e60e47639a73b16790657f8f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/f7bbb751e60e47639a73b16790657f8f 2024-11-21T00:28:35,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/778cb752faed4cb382d5e18811bd2db8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/778cb752faed4cb382d5e18811bd2db8 2024-11-21T00:28:35,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7ca177f7a9c24117ae4a03060c0b8154 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/7ca177f7a9c24117ae4a03060c0b8154 2024-11-21T00:28:35,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eec87108e7e44b0c887d74154e92462f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7010f11256644e15a9dd9c9d92f97f2f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/db2f092d5ae74d69bade7cac0aec4b13, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/500a7e520d4c4eaf9ceec242ee0a312c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6d89f9c6cf3944dc8e12fe9349ffa414, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f901ec666acf4cfc9eb6c18504c3c00e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/07197113ab724aecb58c91bf690aa2bf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d8553174bef546fabd46f59ff410b652, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f884156eaa2f4c03a054597c88a2f4ed, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/341de1af37ac4ba184021a8f36e6f87d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/05b8864b01a4431e8a0508f14c96a0dc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/9ea86beb52e04293b0c7e78be30824de, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/de77c48747ae42748049445965866ed9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6c3c331c2f7d48eb911b6abab1a7f0bb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/615abd6886434ad5a56bd857d9822a19, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/46e2bd47fb804bfc93abe80664d6412a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eda84e91db574a3ca1b7644b2e47e0e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6764e263b8d24f2ab605e095b020d3b9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/57aa7341429a4c6ba9de44babd0e5b99, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/44000524fb624e3aaacb56a7a45b2214, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f2ae4160d26449148d9408363471d438, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c33df9504dad4a298ddf8f6b973010c0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7eecc53573484b87b8c8b361d6f97fef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6364f931f05645ff93ce26494b93b3ae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/06990b80b1bf444e82aa668792c3ad47, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d1fb5f03a65b4772b1a910f41e8a3874, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d9832f76dc9942ad88dd07fa2813e0c7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/dd9ad4da8ee4495e89bcb5efcc81dbc2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/423d3016bd4647689875fa2b40944358, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/e4733b6967524891948d396749595c86, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/034802cd4e6f42dd8ad7c5de72acb3f9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/b0bb4813982d426288c81d9241288bcf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/0f9dbe90d317400e873fe0c8f4d65f7c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c596bb2fb3bf4d86a25613eb7151d8b3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/a5bb2bfe3d43491f90e6558500c489bb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/22347f7d325c48cd851d5527f0bf0716, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/bf4baad4d611417c997fb58d35722192] to archive 2024-11-21T00:28:35,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:28:35,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eec87108e7e44b0c887d74154e92462f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eec87108e7e44b0c887d74154e92462f 2024-11-21T00:28:35,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7010f11256644e15a9dd9c9d92f97f2f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7010f11256644e15a9dd9c9d92f97f2f 2024-11-21T00:28:35,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/db2f092d5ae74d69bade7cac0aec4b13 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/db2f092d5ae74d69bade7cac0aec4b13 2024-11-21T00:28:35,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/500a7e520d4c4eaf9ceec242ee0a312c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/500a7e520d4c4eaf9ceec242ee0a312c 2024-11-21T00:28:35,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6d89f9c6cf3944dc8e12fe9349ffa414 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6d89f9c6cf3944dc8e12fe9349ffa414 2024-11-21T00:28:35,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f901ec666acf4cfc9eb6c18504c3c00e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f901ec666acf4cfc9eb6c18504c3c00e 2024-11-21T00:28:35,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/07197113ab724aecb58c91bf690aa2bf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/07197113ab724aecb58c91bf690aa2bf 2024-11-21T00:28:35,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d8553174bef546fabd46f59ff410b652 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d8553174bef546fabd46f59ff410b652 2024-11-21T00:28:35,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f884156eaa2f4c03a054597c88a2f4ed to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f884156eaa2f4c03a054597c88a2f4ed 2024-11-21T00:28:35,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/341de1af37ac4ba184021a8f36e6f87d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/341de1af37ac4ba184021a8f36e6f87d 2024-11-21T00:28:35,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/05b8864b01a4431e8a0508f14c96a0dc to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/05b8864b01a4431e8a0508f14c96a0dc 2024-11-21T00:28:35,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/9ea86beb52e04293b0c7e78be30824de to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/9ea86beb52e04293b0c7e78be30824de 2024-11-21T00:28:35,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/de77c48747ae42748049445965866ed9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/de77c48747ae42748049445965866ed9 2024-11-21T00:28:35,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6c3c331c2f7d48eb911b6abab1a7f0bb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6c3c331c2f7d48eb911b6abab1a7f0bb 2024-11-21T00:28:35,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/615abd6886434ad5a56bd857d9822a19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/615abd6886434ad5a56bd857d9822a19 2024-11-21T00:28:35,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/46e2bd47fb804bfc93abe80664d6412a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/46e2bd47fb804bfc93abe80664d6412a 2024-11-21T00:28:35,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eda84e91db574a3ca1b7644b2e47e0e5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/eda84e91db574a3ca1b7644b2e47e0e5 2024-11-21T00:28:35,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6764e263b8d24f2ab605e095b020d3b9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6764e263b8d24f2ab605e095b020d3b9 2024-11-21T00:28:35,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/57aa7341429a4c6ba9de44babd0e5b99 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/57aa7341429a4c6ba9de44babd0e5b99 2024-11-21T00:28:35,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/44000524fb624e3aaacb56a7a45b2214 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/44000524fb624e3aaacb56a7a45b2214 2024-11-21T00:28:35,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f2ae4160d26449148d9408363471d438 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/f2ae4160d26449148d9408363471d438 2024-11-21T00:28:35,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c33df9504dad4a298ddf8f6b973010c0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c33df9504dad4a298ddf8f6b973010c0 2024-11-21T00:28:35,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7eecc53573484b87b8c8b361d6f97fef to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/7eecc53573484b87b8c8b361d6f97fef 2024-11-21T00:28:35,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6364f931f05645ff93ce26494b93b3ae to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/6364f931f05645ff93ce26494b93b3ae 2024-11-21T00:28:35,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/06990b80b1bf444e82aa668792c3ad47 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/06990b80b1bf444e82aa668792c3ad47 2024-11-21T00:28:35,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d1fb5f03a65b4772b1a910f41e8a3874 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d1fb5f03a65b4772b1a910f41e8a3874 2024-11-21T00:28:35,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d9832f76dc9942ad88dd07fa2813e0c7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d9832f76dc9942ad88dd07fa2813e0c7 2024-11-21T00:28:35,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/dd9ad4da8ee4495e89bcb5efcc81dbc2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/dd9ad4da8ee4495e89bcb5efcc81dbc2 2024-11-21T00:28:35,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/423d3016bd4647689875fa2b40944358 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/423d3016bd4647689875fa2b40944358 2024-11-21T00:28:35,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/e4733b6967524891948d396749595c86 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/e4733b6967524891948d396749595c86 2024-11-21T00:28:35,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/034802cd4e6f42dd8ad7c5de72acb3f9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/034802cd4e6f42dd8ad7c5de72acb3f9 2024-11-21T00:28:35,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/b0bb4813982d426288c81d9241288bcf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/b0bb4813982d426288c81d9241288bcf 2024-11-21T00:28:35,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/0f9dbe90d317400e873fe0c8f4d65f7c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/0f9dbe90d317400e873fe0c8f4d65f7c 2024-11-21T00:28:35,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c596bb2fb3bf4d86a25613eb7151d8b3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/c596bb2fb3bf4d86a25613eb7151d8b3 2024-11-21T00:28:35,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/a5bb2bfe3d43491f90e6558500c489bb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/a5bb2bfe3d43491f90e6558500c489bb 2024-11-21T00:28:35,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/22347f7d325c48cd851d5527f0bf0716 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/22347f7d325c48cd851d5527f0bf0716 2024-11-21T00:28:35,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/bf4baad4d611417c997fb58d35722192 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/bf4baad4d611417c997fb58d35722192 2024-11-21T00:28:35,462 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/recovered.edits/535.seqid, newMaxSeqId=535, maxSeqId=1 2024-11-21T00:28:35,462 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01. 2024-11-21T00:28:35,462 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 1847e2b1a8a082929629d13ff179eb01: 2024-11-21T00:28:35,464 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:35,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=1847e2b1a8a082929629d13ff179eb01, regionState=CLOSED 2024-11-21T00:28:35,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-21T00:28:35,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 1847e2b1a8a082929629d13ff179eb01, server=0e7930017ff8,37961,1732148819586 in 800 msec 2024-11-21T00:28:35,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-21T00:28:35,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1847e2b1a8a082929629d13ff179eb01, UNASSIGN in 808 msec 2024-11-21T00:28:35,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-21T00:28:35,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 812 msec 2024-11-21T00:28:35,472 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148915471"}]},"ts":"1732148915471"} 2024-11-21T00:28:35,474 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-21T00:28:35,488 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-21T00:28:35,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 853 msec 2024-11-21T00:28:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-21T00:28:35,741 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-21T00:28:35,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-21T00:28:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:35,743 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:35,743 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:35,747 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-21T00:28:35,749 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/recovered.edits] 2024-11-21T00:28:35,751 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/583b873251b347f8a3120919589b88c2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/583b873251b347f8a3120919589b88c2 2024-11-21T00:28:35,752 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8489a78c3db049539df2f4cca687df62 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/A/8489a78c3db049539df2f4cca687df62 2024-11-21T00:28:35,754 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/498973cbbd0b496fbe65b0f39e477d37 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/498973cbbd0b496fbe65b0f39e477d37 2024-11-21T00:28:35,755 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8563fa695c5840a19ee6e5ae58173708 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/B/8563fa695c5840a19ee6e5ae58173708 2024-11-21T00:28:35,757 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/cd6ca4417c084d0b91ac305a3abc98c8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/cd6ca4417c084d0b91ac305a3abc98c8 2024-11-21T00:28:35,758 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d807e1a9e0d5416e97c2a6ab07b71a93 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/C/d807e1a9e0d5416e97c2a6ab07b71a93 2024-11-21T00:28:35,760 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/recovered.edits/535.seqid to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01/recovered.edits/535.seqid 2024-11-21T00:28:35,760 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/1847e2b1a8a082929629d13ff179eb01 2024-11-21T00:28:35,760 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-21T00:28:35,762 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:35,766 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-21T00:28:35,769 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-21T00:28:35,770 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:35,770 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-21T00:28:35,771 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732148915770"}]},"ts":"9223372036854775807"} 2024-11-21T00:28:35,776 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-21T00:28:35,776 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1847e2b1a8a082929629d13ff179eb01, NAME => 'TestAcidGuarantees,,1732148886970.1847e2b1a8a082929629d13ff179eb01.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T00:28:35,776 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-21T00:28:35,776 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732148915776"}]},"ts":"9223372036854775807"} 2024-11-21T00:28:35,778 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-21T00:28:35,806 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:35,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 64 msec 2024-11-21T00:28:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-21T00:28:35,848 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-21T00:28:35,858 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=238 (was 241), OpenFileDescriptor=453 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=857 (was 788) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2387 (was 3008) 2024-11-21T00:28:35,868 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=238, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=857, ProcessCount=11, AvailableMemoryMB=2387 2024-11-21T00:28:35,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:28:35,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:35,871 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:35,872 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:35,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 96 2024-11-21T00:28:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-21T00:28:35,872 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:35,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742174_1350 (size=963) 2024-11-21T00:28:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-21T00:28:36,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-21T00:28:36,287 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:28:36,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742175_1351 (size=53) 2024-11-21T00:28:36,295 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:36,295 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 306fd645e20cdcec516bf24d0ab4894b, disabling compactions & flushes 2024-11-21T00:28:36,295 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,295 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,295 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. after waiting 0 ms 2024-11-21T00:28:36,295 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,295 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,295 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:36,296 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:36,296 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732148916296"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148916296"}]},"ts":"1732148916296"} 2024-11-21T00:28:36,297 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-21T00:28:36,298 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:36,298 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148916298"}]},"ts":"1732148916298"} 2024-11-21T00:28:36,300 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-21T00:28:36,354 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, ASSIGN}] 2024-11-21T00:28:36,355 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, ASSIGN 2024-11-21T00:28:36,355 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, ASSIGN; state=OFFLINE, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=false 2024-11-21T00:28:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-21T00:28:36,506 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:36,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; OpenRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:28:36,658 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:36,661 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,662 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7285): Opening region: {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:36,662 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,662 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:36,662 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7327): checking encryption for 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,662 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7330): checking classloading for 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,663 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,665 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:36,665 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 306fd645e20cdcec516bf24d0ab4894b columnFamilyName A 2024-11-21T00:28:36,665 DEBUG [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:36,666 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(327): Store=306fd645e20cdcec516bf24d0ab4894b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:36,667 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,668 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:36,668 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 306fd645e20cdcec516bf24d0ab4894b columnFamilyName B 2024-11-21T00:28:36,668 DEBUG [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:36,669 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(327): Store=306fd645e20cdcec516bf24d0ab4894b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:36,669 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,670 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:36,670 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 306fd645e20cdcec516bf24d0ab4894b columnFamilyName C 2024-11-21T00:28:36,670 DEBUG [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:36,671 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(327): Store=306fd645e20cdcec516bf24d0ab4894b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:36,671 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,673 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,673 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,675 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:28:36,676 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1085): writing seq id for 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:36,680 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:36,680 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1102): Opened 306fd645e20cdcec516bf24d0ab4894b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61527673, jitterRate=-0.0831662267446518}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:28:36,681 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1001): Region open journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:36,682 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., pid=98, masterSystemTime=1732148916658 2024-11-21T00:28:36,686 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,687 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:36,688 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:36,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-21T00:28:36,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; OpenRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 in 188 msec 2024-11-21T00:28:36,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-21T00:28:36,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, ASSIGN in 350 msec 2024-11-21T00:28:36,706 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:36,707 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148916706"}]},"ts":"1732148916706"} 2024-11-21T00:28:36,708 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-21T00:28:36,723 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:36,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 853 msec 2024-11-21T00:28:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-21T00:28:36,976 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-11-21T00:28:36,977 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04f9fed4 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2953086d 2024-11-21T00:28:36,989 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33feebb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:36,990 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:36,991 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:36,992 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:36,993 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35754, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:36,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:28:36,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-21T00:28:37,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742176_1352 (size=999) 2024-11-21T00:28:37,421 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-21T00:28:37,421 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-21T00:28:37,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:28:37,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, REOPEN/MOVE}] 2024-11-21T00:28:37,427 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, REOPEN/MOVE 2024-11-21T00:28:37,427 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:37,428 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:28:37,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:28:37,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:37,581 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,581 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:28:37,581 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 306fd645e20cdcec516bf24d0ab4894b, disabling compactions & flushes 2024-11-21T00:28:37,581 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,581 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,581 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. after waiting 0 ms 2024-11-21T00:28:37,581 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,595 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-21T00:28:37,595 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,595 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:37,595 WARN [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionServer(3786): Not adding moved region record: 306fd645e20cdcec516bf24d0ab4894b to self. 2024-11-21T00:28:37,597 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,597 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=CLOSED 2024-11-21T00:28:37,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-21T00:28:37,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 in 170 msec 2024-11-21T00:28:37,599 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, REOPEN/MOVE; state=CLOSED, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=true 2024-11-21T00:28:37,750 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:37,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:28:37,903 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:37,905 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,905 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:37,906 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,906 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:37,906 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,906 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,907 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,908 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:37,908 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 306fd645e20cdcec516bf24d0ab4894b columnFamilyName A 2024-11-21T00:28:37,909 DEBUG [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:37,909 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(327): Store=306fd645e20cdcec516bf24d0ab4894b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:37,910 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,910 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:37,910 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 306fd645e20cdcec516bf24d0ab4894b columnFamilyName B 2024-11-21T00:28:37,911 DEBUG [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:37,911 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(327): Store=306fd645e20cdcec516bf24d0ab4894b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:37,911 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,911 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:28:37,912 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 306fd645e20cdcec516bf24d0ab4894b columnFamilyName C 2024-11-21T00:28:37,912 DEBUG [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:37,912 INFO [StoreOpener-306fd645e20cdcec516bf24d0ab4894b-1 {}] regionserver.HStore(327): Store=306fd645e20cdcec516bf24d0ab4894b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:37,912 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,913 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,913 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,914 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:28:37,916 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:37,916 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 306fd645e20cdcec516bf24d0ab4894b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63351880, jitterRate=-0.05598342418670654}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:28:37,917 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:37,918 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., pid=103, masterSystemTime=1732148917902 2024-11-21T00:28:37,919 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,919 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:37,919 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=OPEN, openSeqNum=5, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:37,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-11-21T00:28:37,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 in 169 msec 2024-11-21T00:28:37,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-21T00:28:37,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, REOPEN/MOVE in 495 msec 2024-11-21T00:28:37,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-21T00:28:37,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 498 msec 2024-11-21T00:28:37,924 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 927 msec 2024-11-21T00:28:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-21T00:28:37,927 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1157d18a to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2076b3ad 2024-11-21T00:28:37,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c40db2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:37,981 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bcb3d to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20c5edec 2024-11-21T00:28:37,995 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a86cb71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:37,997 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77b5b03d to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@789089aa 2024-11-21T00:28:38,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3401188a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,007 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15bd9063 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@699c96a7 2024-11-21T00:28:38,014 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55650656, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,015 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c0ec341 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@712a5bc9 2024-11-21T00:28:38,022 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c80a40c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,023 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b660061 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62cf69c5 2024-11-21T00:28:38,030 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5910b8c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,031 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45ad0ff5 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c35c7c4 2024-11-21T00:28:38,045 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f9a05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,047 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x28dc77ab to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a259e93 2024-11-21T00:28:38,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b6d860, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,064 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70304ef6 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6179765 2024-11-21T00:28:38,072 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16722a1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,073 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f8ea360 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3518b14b 2024-11-21T00:28:38,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@323d4725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:38,105 DEBUG [hconnection-0x5c00c24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,106 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,107 DEBUG [hconnection-0x3393d3c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,107 DEBUG [hconnection-0x30526586-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,108 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,108 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45730, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:38,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:28:38,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:38,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:38,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:38,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:38,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:38,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:38,129 DEBUG [hconnection-0x4439a2ca-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,130 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-21T00:28:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-21T00:28:38,136 DEBUG [hconnection-0x51dd2c44-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,136 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:38,137 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:38,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:38,137 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148978141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148978141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148978142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,165 DEBUG [hconnection-0x75885f97-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,166 DEBUG [hconnection-0x45b283ed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,166 DEBUG [hconnection-0x7ecf4618-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,168 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,168 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45750, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,169 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,171 DEBUG [hconnection-0x1ca273be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148978171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,172 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,176 DEBUG [hconnection-0x9f3985b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:38,177 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:38,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148978178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ab49664161e0480d99c68ffd9085912c_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148918119/Put/seqid=0 2024-11-21T00:28:38,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742177_1353 (size=12154) 2024-11-21T00:28:38,228 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:38,233 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ab49664161e0480d99c68ffd9085912c_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ab49664161e0480d99c68ffd9085912c_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:38,234 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9a7c5d7865a14b5aba30538f98548504, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:38,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9a7c5d7865a14b5aba30538f98548504 is 175, key is test_row_0/A:col10/1732148918119/Put/seqid=0 2024-11-21T00:28:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-21T00:28:38,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148978243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148978243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148978243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742178_1354 (size=30955) 2024-11-21T00:28:38,270 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9a7c5d7865a14b5aba30538f98548504 2024-11-21T00:28:38,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148978273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148978281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,289 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:38,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:38,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/c74f5325ba35410dbaae4853b02b624f is 50, key is test_row_0/B:col10/1732148918119/Put/seqid=0 2024-11-21T00:28:38,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742179_1355 (size=12001) 2024-11-21T00:28:38,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/c74f5325ba35410dbaae4853b02b624f 2024-11-21T00:28:38,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/56ed5e654f6e4afc879d89f3395a657a is 50, key is test_row_0/C:col10/1732148918119/Put/seqid=0 2024-11-21T00:28:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-21T00:28:38,442 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:38,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:38,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,443 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148978445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148978445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148978446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742180_1356 (size=12001) 2024-11-21T00:28:38,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/56ed5e654f6e4afc879d89f3395a657a 2024-11-21T00:28:38,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9a7c5d7865a14b5aba30538f98548504 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9a7c5d7865a14b5aba30538f98548504 2024-11-21T00:28:38,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9a7c5d7865a14b5aba30538f98548504, entries=150, sequenceid=15, filesize=30.2 K 2024-11-21T00:28:38,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/c74f5325ba35410dbaae4853b02b624f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c74f5325ba35410dbaae4853b02b624f 2024-11-21T00:28:38,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c74f5325ba35410dbaae4853b02b624f, entries=150, sequenceid=15, filesize=11.7 K 2024-11-21T00:28:38,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/56ed5e654f6e4afc879d89f3395a657a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/56ed5e654f6e4afc879d89f3395a657a 2024-11-21T00:28:38,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/56ed5e654f6e4afc879d89f3395a657a, entries=150, sequenceid=15, filesize=11.7 K 2024-11-21T00:28:38,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 306fd645e20cdcec516bf24d0ab4894b in 349ms, sequenceid=15, compaction requested=false 2024-11-21T00:28:38,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:38,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:38,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-21T00:28:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:38,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f75d29fc534f4ed090087b7408841f64_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148918474/Put/seqid=0 2024-11-21T00:28:38,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148978496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148978483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742181_1357 (size=14594) 2024-11-21T00:28:38,540 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:38,543 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f75d29fc534f4ed090087b7408841f64_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f75d29fc534f4ed090087b7408841f64_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:38,544 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7a401054d7fb4f688d99537dc4696f96, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:38,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7a401054d7fb4f688d99537dc4696f96 is 175, key is test_row_0/A:col10/1732148918474/Put/seqid=0 2024-11-21T00:28:38,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742182_1358 (size=39549) 2024-11-21T00:28:38,595 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:38,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:38,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148978597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-21T00:28:38,748 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:38,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:38,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148978748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148978748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148978748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148978800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:38,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148978801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,901 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:38,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:38,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:38,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:38,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:38,994 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7a401054d7fb4f688d99537dc4696f96 2024-11-21T00:28:39,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/a45c425e22044bb9a38b735f4c9bc03c is 50, key is test_row_0/B:col10/1732148918474/Put/seqid=0 2024-11-21T00:28:39,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742183_1359 (size=12001) 2024-11-21T00:28:39,056 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:39,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:39,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148979107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,211 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:39,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:39,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-21T00:28:39,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:39,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148979254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:39,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148979256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148979255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:39,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148979307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,363 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:39,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:39,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/a45c425e22044bb9a38b735f4c9bc03c 2024-11-21T00:28:39,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/e42062edb6584afb8c587ca09544bc6c is 50, key is test_row_0/C:col10/1732148918474/Put/seqid=0 2024-11-21T00:28:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742184_1360 (size=12001) 2024-11-21T00:28:39,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:39,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:39,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:39,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148979612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,669 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:39,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:39,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:39,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:39,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:39,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/e42062edb6584afb8c587ca09544bc6c 2024-11-21T00:28:39,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7a401054d7fb4f688d99537dc4696f96 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7a401054d7fb4f688d99537dc4696f96 2024-11-21T00:28:39,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7a401054d7fb4f688d99537dc4696f96, entries=200, sequenceid=43, filesize=38.6 K 2024-11-21T00:28:39,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/a45c425e22044bb9a38b735f4c9bc03c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/a45c425e22044bb9a38b735f4c9bc03c 2024-11-21T00:28:39,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/a45c425e22044bb9a38b735f4c9bc03c, entries=150, sequenceid=43, filesize=11.7 K 2024-11-21T00:28:39,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/e42062edb6584afb8c587ca09544bc6c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/e42062edb6584afb8c587ca09544bc6c 2024-11-21T00:28:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/e42062edb6584afb8c587ca09544bc6c, entries=150, sequenceid=43, filesize=11.7 K 2024-11-21T00:28:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 306fd645e20cdcec516bf24d0ab4894b in 1468ms, sequenceid=43, compaction requested=false 2024-11-21T00:28:39,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-21T00:28:39,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:39,980 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-21T00:28:39,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:39,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:39,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:39,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121329c2594536e4fbaa14806d541270f7d_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_1/A:col10/1732148918488/Put/seqid=0 2024-11-21T00:28:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742185_1361 (size=9714) 2024-11-21T00:28:40,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,042 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121329c2594536e4fbaa14806d541270f7d_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121329c2594536e4fbaa14806d541270f7d_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f4413317717f4524862dd42b0dc562e5, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f4413317717f4524862dd42b0dc562e5 is 175, key is test_row_1/A:col10/1732148918488/Put/seqid=0 2024-11-21T00:28:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742186_1362 (size=22361) 2024-11-21T00:28:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,083 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f4413317717f4524862dd42b0dc562e5 2024-11-21T00:28:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/c33655d1173942c8be35781269bb3f6c is 50, key is test_row_1/B:col10/1732148918488/Put/seqid=0 2024-11-21T00:28:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742187_1363 (size=9657) 2024-11-21T00:28:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,137 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/c33655d1173942c8be35781269bb3f6c 2024-11-21T00:28:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/46747fcd2de545edb4b3d7380d967073 is 50, key is test_row_1/C:col10/1732148918488/Put/seqid=0 2024-11-21T00:28:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742188_1364 (size=9657) 2024-11-21T00:28:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,180 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/46747fcd2de545edb4b3d7380d967073 2024-11-21T00:28:40,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f4413317717f4524862dd42b0dc562e5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f4413317717f4524862dd42b0dc562e5 2024-11-21T00:28:40,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,194 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f4413317717f4524862dd42b0dc562e5, entries=100, sequenceid=51, filesize=21.8 K 2024-11-21T00:28:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/c33655d1173942c8be35781269bb3f6c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c33655d1173942c8be35781269bb3f6c 2024-11-21T00:28:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,199 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c33655d1173942c8be35781269bb3f6c, entries=100, sequenceid=51, filesize=9.4 K 2024-11-21T00:28:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/46747fcd2de545edb4b3d7380d967073 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/46747fcd2de545edb4b3d7380d967073 2024-11-21T00:28:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,205 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/46747fcd2de545edb4b3d7380d967073, entries=100, sequenceid=51, filesize=9.4 K 2024-11-21T00:28:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,207 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 306fd645e20cdcec516bf24d0ab4894b in 227ms, sequenceid=51, compaction requested=true 2024-11-21T00:28:40,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:40,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:40,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-21T00:28:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-21T00:28:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-21T00:28:40,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0740 sec 2024-11-21T00:28:40,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.0820 sec 2024-11-21T00:28:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-21T00:28:40,243 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-21T00:28:40,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-21T00:28:40,246 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-21T00:28:40,247 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:40,247 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:40,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-21T00:28:40,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,399 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-21T00:28:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:40,400 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-21T00:28:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112126dcb4271b6744aa9545e6e1da06acd1_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148920383/Put/seqid=0 2024-11-21T00:28:40,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742189_1365 (size=14594) 2024-11-21T00:28:40,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-21T00:28:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148980584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148980585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148980587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148980588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148980617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148980694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148980694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148980700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148980701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-21T00:28:40,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,864 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112126dcb4271b6744aa9545e6e1da06acd1_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112126dcb4271b6744aa9545e6e1da06acd1_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:40,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/2ad49fea1e564710a91e9948e8c5ccce, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:40,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/2ad49fea1e564710a91e9948e8c5ccce is 175, key is test_row_0/A:col10/1732148920383/Put/seqid=0 2024-11-21T00:28:40,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742190_1366 (size=39549) 2024-11-21T00:28:40,880 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=58, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/2ad49fea1e564710a91e9948e8c5ccce 2024-11-21T00:28:40,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/991132d3fc70457b95583a49f96144de is 50, key is test_row_0/B:col10/1732148920383/Put/seqid=0 2024-11-21T00:28:40,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148980897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148980898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148980908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:40,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148980908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:40,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742191_1367 (size=12001) 2024-11-21T00:28:40,925 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/991132d3fc70457b95583a49f96144de 2024-11-21T00:28:40,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/b8ef0721886a41b886d4cbbf57bed49a is 50, key is test_row_0/C:col10/1732148920383/Put/seqid=0 2024-11-21T00:28:41,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742192_1368 (size=12001) 2024-11-21T00:28:41,014 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/b8ef0721886a41b886d4cbbf57bed49a 2024-11-21T00:28:41,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/2ad49fea1e564710a91e9948e8c5ccce as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/2ad49fea1e564710a91e9948e8c5ccce 2024-11-21T00:28:41,029 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/2ad49fea1e564710a91e9948e8c5ccce, entries=200, sequenceid=58, filesize=38.6 K 2024-11-21T00:28:41,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/991132d3fc70457b95583a49f96144de as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/991132d3fc70457b95583a49f96144de 2024-11-21T00:28:41,036 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/991132d3fc70457b95583a49f96144de, entries=150, sequenceid=58, filesize=11.7 K 2024-11-21T00:28:41,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/b8ef0721886a41b886d4cbbf57bed49a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/b8ef0721886a41b886d4cbbf57bed49a 2024-11-21T00:28:41,044 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/b8ef0721886a41b886d4cbbf57bed49a, entries=150, sequenceid=58, filesize=11.7 K 2024-11-21T00:28:41,049 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-21T00:28:41,052 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 306fd645e20cdcec516bf24d0ab4894b in 651ms, sequenceid=58, compaction requested=true 2024-11-21T00:28:41,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:41,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-21T00:28:41,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-21T00:28:41,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-21T00:28:41,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 806 msec 2024-11-21T00:28:41,056 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 811 msec 2024-11-21T00:28:41,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:41,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-21T00:28:41,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:41,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:41,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:41,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:41,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:41,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:41,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148981221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148981227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148981227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148981232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121593d8759a1594d9687a8961907172911_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148920587/Put/seqid=0 2024-11-21T00:28:41,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742193_1369 (size=12154) 2024-11-21T00:28:41,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-21T00:28:41,350 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-21T00:28:41,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148981337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148981338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148981338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:41,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-21T00:28:41,359 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:41,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-21T00:28:41,364 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:41,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:41,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-21T00:28:41,526 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-21T00:28:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148981555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148981555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148981555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-21T00:28:41,680 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-21T00:28:41,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:41,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,707 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,711 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121593d8759a1594d9687a8961907172911_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121593d8759a1594d9687a8961907172911_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:41,712 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/dac64c5490f344f1bd0ae7f1f9fc1d28, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:41,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/dac64c5490f344f1bd0ae7f1f9fc1d28 is 175, key is test_row_0/A:col10/1732148920587/Put/seqid=0 2024-11-21T00:28:41,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148981737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742194_1370 (size=30955) 2024-11-21T00:28:41,769 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=89, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/dac64c5490f344f1bd0ae7f1f9fc1d28 2024-11-21T00:28:41,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/bbb97bee23f54f28b7fd46e811cdbc79 is 50, key is test_row_0/B:col10/1732148920587/Put/seqid=0 2024-11-21T00:28:41,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-21T00:28:41,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:41,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742195_1371 (size=12001) 2024-11-21T00:28:41,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/bbb97bee23f54f28b7fd46e811cdbc79 2024-11-21T00:28:41,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:41,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/245d3666bad64805a4befd88bf30eb32 is 50, key is test_row_0/C:col10/1732148920587/Put/seqid=0 2024-11-21T00:28:41,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148981866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148981866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:41,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148981867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742196_1372 (size=12001) 2024-11-21T00:28:41,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/245d3666bad64805a4befd88bf30eb32 2024-11-21T00:28:41,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/dac64c5490f344f1bd0ae7f1f9fc1d28 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/dac64c5490f344f1bd0ae7f1f9fc1d28 2024-11-21T00:28:41,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/dac64c5490f344f1bd0ae7f1f9fc1d28, entries=150, sequenceid=89, filesize=30.2 K 2024-11-21T00:28:41,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/bbb97bee23f54f28b7fd46e811cdbc79 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bbb97bee23f54f28b7fd46e811cdbc79 2024-11-21T00:28:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bbb97bee23f54f28b7fd46e811cdbc79, entries=150, sequenceid=89, filesize=11.7 K 2024-11-21T00:28:41,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/245d3666bad64805a4befd88bf30eb32 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/245d3666bad64805a4befd88bf30eb32 2024-11-21T00:28:41,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/245d3666bad64805a4befd88bf30eb32, entries=150, sequenceid=89, filesize=11.7 K 2024-11-21T00:28:41,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=20.13 KB/20610 for 306fd645e20cdcec516bf24d0ab4894b in 720ms, sequenceid=89, compaction requested=true 2024-11-21T00:28:41,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:41,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:41,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:41,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:41,930 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-21T00:28:41,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:41,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:41,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:28:41,930 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-21T00:28:41,933 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 163369 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-21T00:28:41,933 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/A is initiating minor compaction (all files) 2024-11-21T00:28:41,933 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/A in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,933 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9a7c5d7865a14b5aba30538f98548504, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7a401054d7fb4f688d99537dc4696f96, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f4413317717f4524862dd42b0dc562e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/2ad49fea1e564710a91e9948e8c5ccce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/dac64c5490f344f1bd0ae7f1f9fc1d28] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=159.5 K 2024-11-21T00:28:41,933 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,933 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9a7c5d7865a14b5aba30538f98548504, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7a401054d7fb4f688d99537dc4696f96, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f4413317717f4524862dd42b0dc562e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/2ad49fea1e564710a91e9948e8c5ccce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/dac64c5490f344f1bd0ae7f1f9fc1d28] 2024-11-21T00:28:41,934 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57661 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-21T00:28:41,934 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/B is initiating minor compaction (all files) 2024-11-21T00:28:41,934 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/B in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,934 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c74f5325ba35410dbaae4853b02b624f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/a45c425e22044bb9a38b735f4c9bc03c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c33655d1173942c8be35781269bb3f6c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/991132d3fc70457b95583a49f96144de, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bbb97bee23f54f28b7fd46e811cdbc79] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=56.3 K 2024-11-21T00:28:41,934 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a7c5d7865a14b5aba30538f98548504, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732148918111 2024-11-21T00:28:41,935 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a401054d7fb4f688d99537dc4696f96, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732148918139 2024-11-21T00:28:41,935 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c74f5325ba35410dbaae4853b02b624f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732148918111 2024-11-21T00:28:41,935 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f4413317717f4524862dd42b0dc562e5, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732148918488 2024-11-21T00:28:41,935 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a45c425e22044bb9a38b735f4c9bc03c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732148918139 2024-11-21T00:28:41,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,936 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ad49fea1e564710a91e9948e8c5ccce, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148920377 2024-11-21T00:28:41,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,936 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c33655d1173942c8be35781269bb3f6c, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732148918488 2024-11-21T00:28:41,936 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting dac64c5490f344f1bd0ae7f1f9fc1d28, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732148920536 2024-11-21T00:28:41,937 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 991132d3fc70457b95583a49f96144de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148920377 2024-11-21T00:28:41,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,937 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbb97bee23f54f28b7fd46e811cdbc79, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732148920536 2024-11-21T00:28:41,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,956 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-21T00:28:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,970 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#B#compaction#319 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:41,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,970 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/b3659b66cc404d9880fb09df77abb5cf is 50, key is test_row_0/B:col10/1732148920587/Put/seqid=0 2024-11-21T00:28:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,983 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121eee06d1ae48047fe9c11405bd7498847_306fd645e20cdcec516bf24d0ab4894b store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,987 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121eee06d1ae48047fe9c11405bd7498847_306fd645e20cdcec516bf24d0ab4894b, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:41,987 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121eee06d1ae48047fe9c11405bd7498847_306fd645e20cdcec516bf24d0ab4894b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,991 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:41,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-21T00:28:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,992 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-21T00:28:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:41,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:41,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:41,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f4702171c4a6420797935b48816cdb66_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148921220/Put/seqid=0 2024-11-21T00:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742197_1373 (size=12173) 2024-11-21T00:28:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742198_1374 (size=4469) 2024-11-21T00:28:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742199_1375 (size=9714) 2024-11-21T00:28:42,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,440 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/b3659b66cc404d9880fb09df77abb5cf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b3659b66cc404d9880fb09df77abb5cf 2024-11-21T00:28:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,448 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/B of 306fd645e20cdcec516bf24d0ab4894b into b3659b66cc404d9880fb09df77abb5cf(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:42,448 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:42,448 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/B, priority=11, startTime=1732148921930; duration=0sec 2024-11-21T00:28:42,448 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:42,448 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:B 2024-11-21T00:28:42,448 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-21T00:28:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,450 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57661 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-21T00:28:42,450 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/C is initiating minor compaction (all files) 2024-11-21T00:28:42,450 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/C in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:42,450 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/56ed5e654f6e4afc879d89f3395a657a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/e42062edb6584afb8c587ca09544bc6c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/46747fcd2de545edb4b3d7380d967073, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/b8ef0721886a41b886d4cbbf57bed49a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/245d3666bad64805a4befd88bf30eb32] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=56.3 K 2024-11-21T00:28:42,450 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56ed5e654f6e4afc879d89f3395a657a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732148918111 2024-11-21T00:28:42,451 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e42062edb6584afb8c587ca09544bc6c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732148918139 2024-11-21T00:28:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,451 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46747fcd2de545edb4b3d7380d967073, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732148918488 2024-11-21T00:28:42,451 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8ef0721886a41b886d4cbbf57bed49a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148920377 2024-11-21T00:28:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,452 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 245d3666bad64805a4befd88bf30eb32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732148920536 2024-11-21T00:28:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-21T00:28:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,469 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#A#compaction#318 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,470 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/47db2b8b6c0c48bebeda1204c8ecb925 is 175, key is test_row_0/A:col10/1732148920587/Put/seqid=0 2024-11-21T00:28:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,474 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#C#compaction#321 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,475 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/f1a25adfa4744068bc7099ca5eda5bd1 is 50, key is test_row_0/C:col10/1732148920587/Put/seqid=0 2024-11-21T00:28:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,490 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121f4702171c4a6420797935b48816cdb66_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f4702171c4a6420797935b48816cdb66_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:42,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7e31516382c54d47826573dcd28b8e34, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:42,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7e31516382c54d47826573dcd28b8e34 is 175, key is test_row_0/A:col10/1732148921220/Put/seqid=0 2024-11-21T00:28:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:42,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742201_1377 (size=12173) 2024-11-21T00:28:42,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,515 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/f1a25adfa4744068bc7099ca5eda5bd1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f1a25adfa4744068bc7099ca5eda5bd1 2024-11-21T00:28:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,522 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/C of 306fd645e20cdcec516bf24d0ab4894b into f1a25adfa4744068bc7099ca5eda5bd1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:42,523 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:42,523 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/C, priority=11, startTime=1732148921930; duration=0sec 2024-11-21T00:28:42,523 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:42,523 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:C 2024-11-21T00:28:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742200_1376 (size=31127) 2024-11-21T00:28:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,535 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/47db2b8b6c0c48bebeda1204c8ecb925 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/47db2b8b6c0c48bebeda1204c8ecb925 2024-11-21T00:28:42,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,540 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/A of 306fd645e20cdcec516bf24d0ab4894b into 47db2b8b6c0c48bebeda1204c8ecb925(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:42,540 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:42,540 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/A, priority=11, startTime=1732148921930; duration=0sec 2024-11-21T00:28:42,541 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:42,541 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:A 2024-11-21T00:28:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742202_1378 (size=22361) 2024-11-21T00:28:42,568 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7e31516382c54d47826573dcd28b8e34 2024-11-21T00:28:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:42,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/e2f486e763ee4d3bb246a9abe749c478 is 50, key is test_row_0/B:col10/1732148921220/Put/seqid=0 2024-11-21T00:28:42,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742203_1379 (size=9657) 2024-11-21T00:28:42,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148982637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148982639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,647 DEBUG [Thread-1596 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., hostname=0e7930017ff8,37961,1732148819586, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:42,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148982647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148982647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148982747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148982753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148982757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148982757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148982952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148982958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:42,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:42,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148982959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,039 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/e2f486e763ee4d3bb246a9abe749c478 2024-11-21T00:28:43,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/cac6213a975142419cfe3d93dbb61398 is 50, key is test_row_0/C:col10/1732148921220/Put/seqid=0 2024-11-21T00:28:43,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742204_1380 (size=9657) 2024-11-21T00:28:43,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148983256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148983262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148983263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,449 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/cac6213a975142419cfe3d93dbb61398 2024-11-21T00:28:43,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/7e31516382c54d47826573dcd28b8e34 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7e31516382c54d47826573dcd28b8e34 2024-11-21T00:28:43,456 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7e31516382c54d47826573dcd28b8e34, entries=100, sequenceid=95, filesize=21.8 K 2024-11-21T00:28:43,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/e2f486e763ee4d3bb246a9abe749c478 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e2f486e763ee4d3bb246a9abe749c478 2024-11-21T00:28:43,463 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e2f486e763ee4d3bb246a9abe749c478, entries=100, sequenceid=95, filesize=9.4 K 2024-11-21T00:28:43,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/cac6213a975142419cfe3d93dbb61398 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cac6213a975142419cfe3d93dbb61398 2024-11-21T00:28:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-21T00:28:43,469 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cac6213a975142419cfe3d93dbb61398, entries=100, sequenceid=95, filesize=9.4 K 2024-11-21T00:28:43,469 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 306fd645e20cdcec516bf24d0ab4894b in 1477ms, sequenceid=95, compaction requested=false 2024-11-21T00:28:43,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:43,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:43,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-21T00:28:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-21T00:28:43,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-21T00:28:43,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1060 sec 2024-11-21T00:28:43,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.1140 sec 2024-11-21T00:28:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:43,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-21T00:28:43,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:43,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:43,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:43,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:43,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:43,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:43,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148983768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112143668bbc5f6f4401b66a89f0af158fa0_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148922643/Put/seqid=0 2024-11-21T00:28:43,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148983768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148983769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742205_1381 (size=14644) 2024-11-21T00:28:43,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148983872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148983876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:43,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:43,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148983876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:44,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:44,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148984076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:44,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:44,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148984081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:44,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:44,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148984082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:44,178 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,184 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112143668bbc5f6f4401b66a89f0af158fa0_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112143668bbc5f6f4401b66a89f0af158fa0_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:44,185 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/b0db89da230945c784fc78f173970cb9, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:44,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/b0db89da230945c784fc78f173970cb9 is 175, key is test_row_0/A:col10/1732148922643/Put/seqid=0 2024-11-21T00:28:44,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742206_1382 (size=39599) 2024-11-21T00:28:44,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:44,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148984381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:44,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:44,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148984386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:44,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:44,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148984386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:44,603 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/b0db89da230945c784fc78f173970cb9 2024-11-21T00:28:44,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ece27a92523e4a18b4028c073d9035ce is 50, key is test_row_0/B:col10/1732148922643/Put/seqid=0 2024-11-21T00:28:44,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742207_1383 (size=12051) 2024-11-21T00:28:44,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ece27a92523e4a18b4028c073d9035ce 2024-11-21T00:28:44,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/be1717b181fb4089802023b66f052810 is 50, key is test_row_0/C:col10/1732148922643/Put/seqid=0 2024-11-21T00:28:44,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742208_1384 (size=12051) 2024-11-21T00:28:44,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/be1717b181fb4089802023b66f052810 2024-11-21T00:28:44,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/b0db89da230945c784fc78f173970cb9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/b0db89da230945c784fc78f173970cb9 2024-11-21T00:28:44,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/b0db89da230945c784fc78f173970cb9, entries=200, sequenceid=129, filesize=38.7 K 2024-11-21T00:28:44,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ece27a92523e4a18b4028c073d9035ce as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ece27a92523e4a18b4028c073d9035ce 2024-11-21T00:28:44,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ece27a92523e4a18b4028c073d9035ce, entries=150, sequenceid=129, filesize=11.8 K 2024-11-21T00:28:44,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/be1717b181fb4089802023b66f052810 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/be1717b181fb4089802023b66f052810 2024-11-21T00:28:44,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/be1717b181fb4089802023b66f052810, entries=150, sequenceid=129, filesize=11.8 K 2024-11-21T00:28:44,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for 306fd645e20cdcec516bf24d0ab4894b in 935ms, sequenceid=129, compaction requested=true 2024-11-21T00:28:44,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:44,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:44,703 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:44,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:44,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:44,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:44,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:44,703 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,704 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93087 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,704 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/A is initiating minor compaction (all files) 2024-11-21T00:28:44,705 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/A in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:44,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,705 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/47db2b8b6c0c48bebeda1204c8ecb925, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7e31516382c54d47826573dcd28b8e34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/b0db89da230945c784fc78f173970cb9] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=90.9 K 2024-11-21T00:28:44,705 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:44,705 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/47db2b8b6c0c48bebeda1204c8ecb925, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7e31516382c54d47826573dcd28b8e34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/b0db89da230945c784fc78f173970cb9] 2024-11-21T00:28:44,705 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33881 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:44,705 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/B is initiating minor compaction (all files) 2024-11-21T00:28:44,705 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/B in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:44,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,705 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b3659b66cc404d9880fb09df77abb5cf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e2f486e763ee4d3bb246a9abe749c478, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ece27a92523e4a18b4028c073d9035ce] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=33.1 K 2024-11-21T00:28:44,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,706 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b3659b66cc404d9880fb09df77abb5cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732148920536 2024-11-21T00:28:44,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,706 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47db2b8b6c0c48bebeda1204c8ecb925, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732148920536 2024-11-21T00:28:44,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,707 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e2f486e763ee4d3bb246a9abe749c478, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732148921220 2024-11-21T00:28:44,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,707 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e31516382c54d47826573dcd28b8e34, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732148921220 2024-11-21T00:28:44,707 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ece27a92523e4a18b4028c073d9035ce, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732148922616 2024-11-21T00:28:44,707 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0db89da230945c784fc78f173970cb9, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732148922616 2024-11-21T00:28:44,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,727 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,740 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#B#compaction#328 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:44,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,740 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/5f32c95385dd41ea905ccc5922cb22d0 is 50, key is test_row_0/B:col10/1732148922643/Put/seqid=0 2024-11-21T00:28:44,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,744 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121ac871d7ea445431c938865be76e9b2ca_306fd645e20cdcec516bf24d0ab4894b store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:44,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,746 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121ac871d7ea445431c938865be76e9b2ca_306fd645e20cdcec516bf24d0ab4894b, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:44,746 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ac871d7ea445431c938865be76e9b2ca_306fd645e20cdcec516bf24d0ab4894b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:44,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742209_1385 (size=12325) 2024-11-21T00:28:44,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,778 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/5f32c95385dd41ea905ccc5922cb22d0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/5f32c95385dd41ea905ccc5922cb22d0 2024-11-21T00:28:44,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,783 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/B of 306fd645e20cdcec516bf24d0ab4894b into 5f32c95385dd41ea905ccc5922cb22d0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:44,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,783 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:44,783 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/B, priority=13, startTime=1732148924703; duration=0sec 2024-11-21T00:28:44,784 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:44,784 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:B 2024-11-21T00:28:44,784 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:44,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,785 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33881 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,785 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/C is initiating minor compaction (all files) 2024-11-21T00:28:44,785 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/C in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:44,785 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f1a25adfa4744068bc7099ca5eda5bd1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cac6213a975142419cfe3d93dbb61398, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/be1717b181fb4089802023b66f052810] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=33.1 K 2024-11-21T00:28:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,785 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f1a25adfa4744068bc7099ca5eda5bd1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732148920536 2024-11-21T00:28:44,786 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting cac6213a975142419cfe3d93dbb61398, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732148921220 2024-11-21T00:28:44,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,786 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting be1717b181fb4089802023b66f052810, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732148922616 2024-11-21T00:28:44,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742210_1386 (size=4469) 2024-11-21T00:28:44,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,806 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#A#compaction#327 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:44,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,807 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/35b414405163437bb3390cd7f55c90a6 is 175, key is test_row_0/A:col10/1732148922643/Put/seqid=0 2024-11-21T00:28:44,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,817 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#C#compaction#329 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:44,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,818 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/6b9deb97a0d943dba383c5c6b437e10d is 50, key is test_row_0/C:col10/1732148922643/Put/seqid=0 2024-11-21T00:28:44,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742211_1387 (size=31279) 2024-11-21T00:28:44,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742212_1388 (size=12325) 2024-11-21T00:28:44,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:44,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:28:44,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:44,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:44,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:44,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:44,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:44,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:44,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121318050e28ea34975bc6550c60779d8f3_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148924928/Put/seqid=0 2024-11-21T00:28:44,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742213_1389 (size=19774) 2024-11-21T00:28:44,970 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:44,976 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121318050e28ea34975bc6550c60779d8f3_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121318050e28ea34975bc6550c60779d8f3_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:44,977 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/05d8d7e9bb004ceaa8a53682f29aacdb, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:44,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/05d8d7e9bb004ceaa8a53682f29aacdb is 175, key is test_row_0/A:col10/1732148924928/Put/seqid=0 2024-11-21T00:28:44,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742214_1390 (size=57033) 2024-11-21T00:28:44,986 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=141, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/05d8d7e9bb004ceaa8a53682f29aacdb 2024-11-21T00:28:45,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/52e68e84b62642609b7423207ae551e6 is 50, key is test_row_0/B:col10/1732148924928/Put/seqid=0 2024-11-21T00:28:45,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742215_1391 (size=12151) 2024-11-21T00:28:45,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/52e68e84b62642609b7423207ae551e6 2024-11-21T00:28:45,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/0096c19f074b47998ef54fd64f7d7c02 is 50, key is test_row_0/C:col10/1732148924928/Put/seqid=0 2024-11-21T00:28:45,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148985044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148985045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148985047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148985048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742216_1392 (size=12151) 2024-11-21T00:28:45,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148985151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148985154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148985155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148985155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,266 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/35b414405163437bb3390cd7f55c90a6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/35b414405163437bb3390cd7f55c90a6 2024-11-21T00:28:45,271 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/A of 306fd645e20cdcec516bf24d0ab4894b into 35b414405163437bb3390cd7f55c90a6(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:45,271 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:45,271 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/A, priority=13, startTime=1732148924703; duration=0sec 2024-11-21T00:28:45,271 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:45,271 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:A 2024-11-21T00:28:45,292 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/6b9deb97a0d943dba383c5c6b437e10d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b9deb97a0d943dba383c5c6b437e10d 2024-11-21T00:28:45,295 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/C of 306fd645e20cdcec516bf24d0ab4894b into 6b9deb97a0d943dba383c5c6b437e10d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:45,295 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:45,295 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/C, priority=13, startTime=1732148924703; duration=0sec 2024-11-21T00:28:45,295 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:45,295 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:C 2024-11-21T00:28:45,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148985357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148985358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148985360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148985361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/0096c19f074b47998ef54fd64f7d7c02 2024-11-21T00:28:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/05d8d7e9bb004ceaa8a53682f29aacdb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/05d8d7e9bb004ceaa8a53682f29aacdb 2024-11-21T00:28:45,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/05d8d7e9bb004ceaa8a53682f29aacdb, entries=300, sequenceid=141, filesize=55.7 K 2024-11-21T00:28:45,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-21T00:28:45,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/52e68e84b62642609b7423207ae551e6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/52e68e84b62642609b7423207ae551e6 2024-11-21T00:28:45,465 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-21T00:28:45,466 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:45,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-21T00:28:45,468 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:45,469 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:45,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-21T00:28:45,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/52e68e84b62642609b7423207ae551e6, entries=150, sequenceid=141, filesize=11.9 K 2024-11-21T00:28:45,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/0096c19f074b47998ef54fd64f7d7c02 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/0096c19f074b47998ef54fd64f7d7c02 2024-11-21T00:28:45,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/0096c19f074b47998ef54fd64f7d7c02, entries=150, sequenceid=141, filesize=11.9 K 2024-11-21T00:28:45,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 306fd645e20cdcec516bf24d0ab4894b in 547ms, sequenceid=141, compaction requested=false 2024-11-21T00:28:45,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:45,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-21T00:28:45,620 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-21T00:28:45,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:45,621 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:45,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:45,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:45,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:45,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:45,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:45,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112163114e5430f64491901450715853877a_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148925033/Put/seqid=0 2024-11-21T00:28:45,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742217_1393 (size=12304) 2024-11-21T00:28:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:45,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:45,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148985672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148985670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148985679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148985679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-21T00:28:45,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148985781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148985781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148985788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148985790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148985986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148985986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:45,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148985993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148985996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,035 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112163114e5430f64491901450715853877a_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112163114e5430f64491901450715853877a_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:46,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a7c9694db9ae4c10ba971bb99bdc8b0e, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:46,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a7c9694db9ae4c10ba971bb99bdc8b0e is 175, key is test_row_0/A:col10/1732148925033/Put/seqid=0 2024-11-21T00:28:46,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742218_1394 (size=31105) 2024-11-21T00:28:46,040 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a7c9694db9ae4c10ba971bb99bdc8b0e 2024-11-21T00:28:46,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/dacc4ae1f8a543c9a36ffd643d0d3e2e is 50, key is test_row_0/B:col10/1732148925033/Put/seqid=0 2024-11-21T00:28:46,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742219_1395 (size=12151) 2024-11-21T00:28:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-21T00:28:46,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148986293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148986295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148986299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148986303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,456 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/dacc4ae1f8a543c9a36ffd643d0d3e2e 2024-11-21T00:28:46,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/4768982e5f864ae789c7e276895ae5e4 is 50, key is test_row_0/C:col10/1732148925033/Put/seqid=0 2024-11-21T00:28:46,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742220_1396 (size=12151) 2024-11-21T00:28:46,465 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/4768982e5f864ae789c7e276895ae5e4 2024-11-21T00:28:46,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a7c9694db9ae4c10ba971bb99bdc8b0e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a7c9694db9ae4c10ba971bb99bdc8b0e 2024-11-21T00:28:46,471 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a7c9694db9ae4c10ba971bb99bdc8b0e, entries=150, sequenceid=168, filesize=30.4 K 2024-11-21T00:28:46,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/dacc4ae1f8a543c9a36ffd643d0d3e2e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/dacc4ae1f8a543c9a36ffd643d0d3e2e 2024-11-21T00:28:46,475 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/dacc4ae1f8a543c9a36ffd643d0d3e2e, entries=150, sequenceid=168, filesize=11.9 K 2024-11-21T00:28:46,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/4768982e5f864ae789c7e276895ae5e4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4768982e5f864ae789c7e276895ae5e4 2024-11-21T00:28:46,478 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4768982e5f864ae789c7e276895ae5e4, entries=150, sequenceid=168, filesize=11.9 K 2024-11-21T00:28:46,479 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 306fd645e20cdcec516bf24d0ab4894b in 858ms, sequenceid=168, compaction requested=true 2024-11-21T00:28:46,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:46,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:46,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-21T00:28:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-21T00:28:46,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-21T00:28:46,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0110 sec 2024-11-21T00:28:46,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.0150 sec 2024-11-21T00:28:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-21T00:28:46,572 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-21T00:28:46,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-21T00:28:46,574 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-21T00:28:46,574 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:46,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:46,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-21T00:28:46,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:46,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:46,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:46,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:46,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:46,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-21T00:28:46,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411210b6d10622ff24f72a0b132080cbe39f0_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148926673/Put/seqid=0 2024-11-21T00:28:46,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742221_1397 (size=14794) 2024-11-21T00:28:46,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-21T00:28:46,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:46,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:46,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:46,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:46,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:46,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:46,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148986787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148986799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148986800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148986801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148986811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-21T00:28:46,878 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:46,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-21T00:28:46,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:46,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:46,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:46,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:46,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:46,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:46,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:46,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148986891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,030 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-21T00:28:47,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:47,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,089 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:47,094 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411210b6d10622ff24f72a0b132080cbe39f0_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210b6d10622ff24f72a0b132080cbe39f0_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:47,095 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/76e56e446bee4b79931de26343c0fef8, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:47,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/76e56e446bee4b79931de26343c0fef8 is 175, key is test_row_0/A:col10/1732148926673/Put/seqid=0 2024-11-21T00:28:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742222_1398 (size=39749) 2024-11-21T00:28:47,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148987097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,100 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=181, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/76e56e446bee4b79931de26343c0fef8 2024-11-21T00:28:47,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/499d7dca2bb245d5988eccf67f83d752 is 50, key is test_row_0/B:col10/1732148926673/Put/seqid=0 2024-11-21T00:28:47,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742223_1399 (size=12151) 2024-11-21T00:28:47,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/499d7dca2bb245d5988eccf67f83d752 2024-11-21T00:28:47,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/9af105fc458349cbb81dffe60813d82d is 50, key is test_row_0/C:col10/1732148926673/Put/seqid=0 2024-11-21T00:28:47,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742224_1400 (size=12151) 2024-11-21T00:28:47,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-21T00:28:47,183 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-21T00:28:47,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:47,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,338 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-21T00:28:47,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:47,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148987401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-21T00:28:47,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:47,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:47,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/9af105fc458349cbb81dffe60813d82d 2024-11-21T00:28:47,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/76e56e446bee4b79931de26343c0fef8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/76e56e446bee4b79931de26343c0fef8 2024-11-21T00:28:47,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/76e56e446bee4b79931de26343c0fef8, entries=200, sequenceid=181, filesize=38.8 K 2024-11-21T00:28:47,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/499d7dca2bb245d5988eccf67f83d752 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/499d7dca2bb245d5988eccf67f83d752 2024-11-21T00:28:47,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/499d7dca2bb245d5988eccf67f83d752, entries=150, sequenceid=181, filesize=11.9 K 2024-11-21T00:28:47,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/9af105fc458349cbb81dffe60813d82d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/9af105fc458349cbb81dffe60813d82d 2024-11-21T00:28:47,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/9af105fc458349cbb81dffe60813d82d, entries=150, sequenceid=181, filesize=11.9 K 2024-11-21T00:28:47,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 306fd645e20cdcec516bf24d0ab4894b in 866ms, sequenceid=181, compaction requested=true 2024-11-21T00:28:47,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:47,540 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:47,541 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 159166 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:47,541 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/A is initiating minor compaction (all files) 2024-11-21T00:28:47,541 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/A in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,542 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/35b414405163437bb3390cd7f55c90a6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/05d8d7e9bb004ceaa8a53682f29aacdb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a7c9694db9ae4c10ba971bb99bdc8b0e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/76e56e446bee4b79931de26343c0fef8] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=155.4 K 2024-11-21T00:28:47,542 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,542 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/35b414405163437bb3390cd7f55c90a6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/05d8d7e9bb004ceaa8a53682f29aacdb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a7c9694db9ae4c10ba971bb99bdc8b0e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/76e56e446bee4b79931de26343c0fef8] 2024-11-21T00:28:47,542 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35b414405163437bb3390cd7f55c90a6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732148922616 2024-11-21T00:28:47,542 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05d8d7e9bb004ceaa8a53682f29aacdb, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732148924800 2024-11-21T00:28:47,543 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7c9694db9ae4c10ba971bb99bdc8b0e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732148925033 2024-11-21T00:28:47,543 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76e56e446bee4b79931de26343c0fef8, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148925669 2024-11-21T00:28:47,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:47,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:47,549 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:47,550 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48778 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:47,550 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/B is initiating minor compaction (all files) 2024-11-21T00:28:47,550 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/B in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,550 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/5f32c95385dd41ea905ccc5922cb22d0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/52e68e84b62642609b7423207ae551e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/dacc4ae1f8a543c9a36ffd643d0d3e2e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/499d7dca2bb245d5988eccf67f83d752] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=47.6 K 2024-11-21T00:28:47,551 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f32c95385dd41ea905ccc5922cb22d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732148922616 2024-11-21T00:28:47,552 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 52e68e84b62642609b7423207ae551e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732148924858 2024-11-21T00:28:47,552 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting dacc4ae1f8a543c9a36ffd643d0d3e2e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732148925033 2024-11-21T00:28:47,553 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 499d7dca2bb245d5988eccf67f83d752, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148925676 2024-11-21T00:28:47,555 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:47,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:47,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:47,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:47,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:47,564 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121bdc54071f849484e99f1ba5b000bae08_306fd645e20cdcec516bf24d0ab4894b store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:47,566 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121bdc54071f849484e99f1ba5b000bae08_306fd645e20cdcec516bf24d0ab4894b, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:47,566 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121bdc54071f849484e99f1ba5b000bae08_306fd645e20cdcec516bf24d0ab4894b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:47,570 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#B#compaction#340 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:47,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/f450013fbd2242fdb4a5d4ddc59c64ac is 50, key is test_row_0/B:col10/1732148926673/Put/seqid=0 2024-11-21T00:28:47,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742226_1402 (size=12561) 2024-11-21T00:28:47,595 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/f450013fbd2242fdb4a5d4ddc59c64ac as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f450013fbd2242fdb4a5d4ddc59c64ac 2024-11-21T00:28:47,599 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/B of 306fd645e20cdcec516bf24d0ab4894b into f450013fbd2242fdb4a5d4ddc59c64ac(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:47,599 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:47,599 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/B, priority=12, startTime=1732148927549; duration=0sec 2024-11-21T00:28:47,599 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:47,599 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:B 2024-11-21T00:28:47,599 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:47,600 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48778 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:47,600 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/C is initiating minor compaction (all files) 2024-11-21T00:28:47,600 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/C in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,600 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b9deb97a0d943dba383c5c6b437e10d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/0096c19f074b47998ef54fd64f7d7c02, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4768982e5f864ae789c7e276895ae5e4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/9af105fc458349cbb81dffe60813d82d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=47.6 K 2024-11-21T00:28:47,601 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b9deb97a0d943dba383c5c6b437e10d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732148922616 2024-11-21T00:28:47,601 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0096c19f074b47998ef54fd64f7d7c02, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732148924858 2024-11-21T00:28:47,602 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4768982e5f864ae789c7e276895ae5e4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732148925033 2024-11-21T00:28:47,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742225_1401 (size=4469) 2024-11-21T00:28:47,602 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 9af105fc458349cbb81dffe60813d82d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148925676 2024-11-21T00:28:47,611 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#C#compaction#341 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:47,611 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/35410e5fdcdd48f5804ee9d9e15d608d is 50, key is test_row_0/C:col10/1732148926673/Put/seqid=0 2024-11-21T00:28:47,614 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#A#compaction#339 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:47,615 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/951d320b47414ebca27a60ecec93d168 is 175, key is test_row_0/A:col10/1732148926673/Put/seqid=0 2024-11-21T00:28:47,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-21T00:28:47,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:47,650 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:28:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:47,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742227_1403 (size=12561) 2024-11-21T00:28:47,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742228_1404 (size=31515) 2024-11-21T00:28:47,669 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/35410e5fdcdd48f5804ee9d9e15d608d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/35410e5fdcdd48f5804ee9d9e15d608d 2024-11-21T00:28:47,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411210671b9189bd148e3b77358ac399e8051_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148926786/Put/seqid=0 2024-11-21T00:28:47,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-21T00:28:47,684 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/C of 306fd645e20cdcec516bf24d0ab4894b into 35410e5fdcdd48f5804ee9d9e15d608d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:47,684 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:47,684 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/C, priority=12, startTime=1732148927556; duration=0sec 2024-11-21T00:28:47,684 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:47,684 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:C 2024-11-21T00:28:47,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742229_1405 (size=12304) 2024-11-21T00:28:47,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:47,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:47,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148987833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148987838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148987839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148987842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148987910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148987944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148987948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148987948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:47,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:47,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148987951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,069 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/951d320b47414ebca27a60ecec93d168 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/951d320b47414ebca27a60ecec93d168 2024-11-21T00:28:48,073 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/A of 306fd645e20cdcec516bf24d0ab4894b into 951d320b47414ebca27a60ecec93d168(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:48,073 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:48,073 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/A, priority=12, startTime=1732148927540; duration=0sec 2024-11-21T00:28:48,073 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:48,073 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:A 2024-11-21T00:28:48,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:48,101 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411210671b9189bd148e3b77358ac399e8051_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210671b9189bd148e3b77358ac399e8051_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:48,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/014fa380489c4fb89fc3dfae546e25bb, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:48,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/014fa380489c4fb89fc3dfae546e25bb is 175, key is test_row_0/A:col10/1732148926786/Put/seqid=0 2024-11-21T00:28:48,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742230_1406 (size=31105) 2024-11-21T00:28:48,135 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=205, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/014fa380489c4fb89fc3dfae546e25bb 2024-11-21T00:28:48,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148988150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148988158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148988159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148988166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ac1a4d0f0f2e4a8b83bad092b60396f0 is 50, key is test_row_0/B:col10/1732148926786/Put/seqid=0 2024-11-21T00:28:48,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742231_1407 (size=12151) 2024-11-21T00:28:48,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148988457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148988466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148988471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148988475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,591 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ac1a4d0f0f2e4a8b83bad092b60396f0 2024-11-21T00:28:48,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/d5a64934ac474fbab131338b10e6bea9 is 50, key is test_row_0/C:col10/1732148926786/Put/seqid=0 2024-11-21T00:28:48,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742232_1408 (size=12151) 2024-11-21T00:28:48,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-21T00:28:48,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148988917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148988963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148988970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148988980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:48,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:48,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148988987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:49,003 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/d5a64934ac474fbab131338b10e6bea9 2024-11-21T00:28:49,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/014fa380489c4fb89fc3dfae546e25bb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/014fa380489c4fb89fc3dfae546e25bb 2024-11-21T00:28:49,017 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/014fa380489c4fb89fc3dfae546e25bb, entries=150, sequenceid=205, filesize=30.4 K 2024-11-21T00:28:49,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ac1a4d0f0f2e4a8b83bad092b60396f0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ac1a4d0f0f2e4a8b83bad092b60396f0 2024-11-21T00:28:49,048 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ac1a4d0f0f2e4a8b83bad092b60396f0, entries=150, sequenceid=205, filesize=11.9 K 2024-11-21T00:28:49,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/d5a64934ac474fbab131338b10e6bea9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/d5a64934ac474fbab131338b10e6bea9 2024-11-21T00:28:49,053 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/d5a64934ac474fbab131338b10e6bea9, entries=150, sequenceid=205, filesize=11.9 K 2024-11-21T00:28:49,055 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 306fd645e20cdcec516bf24d0ab4894b in 1406ms, sequenceid=205, compaction requested=false 2024-11-21T00:28:49,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:49,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:49,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-21T00:28:49,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-21T00:28:49,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-21T00:28:49,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4820 sec 2024-11-21T00:28:49,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.4850 sec 2024-11-21T00:28:49,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:49,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:28:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:49,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ddc77e5576d04a3ba843ddfefcc08d8b_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148927838/Put/seqid=0 2024-11-21T00:28:49,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742233_1409 (size=14794) 2024-11-21T00:28:50,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148990005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148990006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148990007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148990008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148990110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148990111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148990111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148990111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148990317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148990318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148990319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148990319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,388 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:50,392 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ddc77e5576d04a3ba843ddfefcc08d8b_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ddc77e5576d04a3ba843ddfefcc08d8b_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:50,393 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb5b45598d91431eb7972c72f08196d6, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:50,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb5b45598d91431eb7972c72f08196d6 is 175, key is test_row_0/A:col10/1732148927838/Put/seqid=0 2024-11-21T00:28:50,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742234_1410 (size=39749) 2024-11-21T00:28:50,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148990627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148990628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148990628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148990628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-21T00:28:50,679 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-21T00:28:50,683 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-21T00:28:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-21T00:28:50,685 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:50,685 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:50,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:50,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-21T00:28:50,799 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=221, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb5b45598d91431eb7972c72f08196d6 2024-11-21T00:28:50,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/b8ada9b7d7894656b9c1e43624641564 is 50, key is test_row_0/B:col10/1732148927838/Put/seqid=0 2024-11-21T00:28:50,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742235_1411 (size=12151) 2024-11-21T00:28:50,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/b8ada9b7d7894656b9c1e43624641564 2024-11-21T00:28:50,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:50,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:50,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:50,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:50,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:50,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/639cf602012449589aca946052083216 is 50, key is test_row_0/C:col10/1732148927838/Put/seqid=0 2024-11-21T00:28:50,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742236_1412 (size=12151) 2024-11-21T00:28:50,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/639cf602012449589aca946052083216 2024-11-21T00:28:50,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb5b45598d91431eb7972c72f08196d6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb5b45598d91431eb7972c72f08196d6 2024-11-21T00:28:50,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb5b45598d91431eb7972c72f08196d6, entries=200, sequenceid=221, filesize=38.8 K 2024-11-21T00:28:50,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/b8ada9b7d7894656b9c1e43624641564 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b8ada9b7d7894656b9c1e43624641564 2024-11-21T00:28:50,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b8ada9b7d7894656b9c1e43624641564, entries=150, sequenceid=221, filesize=11.9 K 2024-11-21T00:28:50,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/639cf602012449589aca946052083216 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/639cf602012449589aca946052083216 2024-11-21T00:28:50,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/639cf602012449589aca946052083216, entries=150, sequenceid=221, filesize=11.9 K 2024-11-21T00:28:50,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 306fd645e20cdcec516bf24d0ab4894b in 938ms, sequenceid=221, compaction requested=true 2024-11-21T00:28:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:50,912 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:50,912 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:50,914 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:50,914 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/B is initiating minor compaction (all files) 2024-11-21T00:28:50,915 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:50,915 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/A is initiating minor compaction (all files) 2024-11-21T00:28:50,915 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/B in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,915 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/A in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,915 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f450013fbd2242fdb4a5d4ddc59c64ac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ac1a4d0f0f2e4a8b83bad092b60396f0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b8ada9b7d7894656b9c1e43624641564] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=36.0 K 2024-11-21T00:28:50,916 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/951d320b47414ebca27a60ecec93d168, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/014fa380489c4fb89fc3dfae546e25bb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb5b45598d91431eb7972c72f08196d6] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=100.0 K 2024-11-21T00:28:50,916 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:50,916 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/951d320b47414ebca27a60ecec93d168, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/014fa380489c4fb89fc3dfae546e25bb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb5b45598d91431eb7972c72f08196d6] 2024-11-21T00:28:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:50,916 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f450013fbd2242fdb4a5d4ddc59c64ac, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148925676 2024-11-21T00:28:50,916 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 951d320b47414ebca27a60ecec93d168, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148925676 2024-11-21T00:28:50,916 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ac1a4d0f0f2e4a8b83bad092b60396f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732148926778 2024-11-21T00:28:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:50,916 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b8ada9b7d7894656b9c1e43624641564, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148927831 2024-11-21T00:28:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:50,917 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 014fa380489c4fb89fc3dfae546e25bb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732148926778 2024-11-21T00:28:50,917 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb5b45598d91431eb7972c72f08196d6, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148927831 2024-11-21T00:28:50,932 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#B#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:50,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:50,933 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/87541d9273ac4fbc8b91b7a3e46af2f2 is 50, key is test_row_0/B:col10/1732148927838/Put/seqid=0 2024-11-21T00:28:50,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:28:50,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:50,935 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:50,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:50,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:50,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:50,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:50,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:50,951 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112135f3bc57c7cb440aaba0ef779f1c6c98_306fd645e20cdcec516bf24d0ab4894b store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:50,952 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112135f3bc57c7cb440aaba0ef779f1c6c98_306fd645e20cdcec516bf24d0ab4894b, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:50,952 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112135f3bc57c7cb440aaba0ef779f1c6c98_306fd645e20cdcec516bf24d0ab4894b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:50,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742237_1413 (size=12663) 2024-11-21T00:28:50,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ba7fe161081746ac81580527704a14eb_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148930003/Put/seqid=0 2024-11-21T00:28:50,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-21T00:28:50,988 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/87541d9273ac4fbc8b91b7a3e46af2f2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/87541d9273ac4fbc8b91b7a3e46af2f2 2024-11-21T00:28:50,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:50,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:50,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:50,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:50,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:50,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:50,993 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/B of 306fd645e20cdcec516bf24d0ab4894b into 87541d9273ac4fbc8b91b7a3e46af2f2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:50,993 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:50,993 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/B, priority=13, startTime=1732148930912; duration=0sec 2024-11-21T00:28:50,993 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:50,993 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:B 2024-11-21T00:28:50,993 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:50,994 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:50,994 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/C is initiating minor compaction (all files) 2024-11-21T00:28:50,994 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/C in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:50,994 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/35410e5fdcdd48f5804ee9d9e15d608d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/d5a64934ac474fbab131338b10e6bea9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/639cf602012449589aca946052083216] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=36.0 K 2024-11-21T00:28:50,995 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 35410e5fdcdd48f5804ee9d9e15d608d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148925676 2024-11-21T00:28:50,996 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d5a64934ac474fbab131338b10e6bea9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732148926778 2024-11-21T00:28:50,996 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 639cf602012449589aca946052083216, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148927831 2024-11-21T00:28:50,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742238_1414 (size=4469) 2024-11-21T00:28:51,003 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#C#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:51,004 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/4de57c16950149d5ae18b7071aeea71b is 50, key is test_row_0/C:col10/1732148927838/Put/seqid=0 2024-11-21T00:28:51,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742239_1415 (size=12304) 2024-11-21T00:28:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742240_1416 (size=12663) 2024-11-21T00:28:51,011 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/4de57c16950149d5ae18b7071aeea71b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4de57c16950149d5ae18b7071aeea71b 2024-11-21T00:28:51,014 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/C of 306fd645e20cdcec516bf24d0ab4894b into 4de57c16950149d5ae18b7071aeea71b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:51,014 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:51,014 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/C, priority=13, startTime=1732148930916; duration=0sec 2024-11-21T00:28:51,015 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:51,015 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:C 2024-11-21T00:28:51,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148991015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148991121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148991134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148991136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148991137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:51,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:51,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148991139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-21T00:28:51,295 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:51,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:51,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,296 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148991329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,400 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#A#compaction#349 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:51,401 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/db014d4dec524387a98b41e764ad02e5 is 175, key is test_row_0/A:col10/1732148927838/Put/seqid=0 2024-11-21T00:28:51,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742241_1417 (size=31617) 2024-11-21T00:28:51,405 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,409 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ba7fe161081746ac81580527704a14eb_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ba7fe161081746ac81580527704a14eb_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:51,409 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/1bdc5b93686b4e5ea5cd07b4752c142c, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:51,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/1bdc5b93686b4e5ea5cd07b4752c142c is 175, key is test_row_0/A:col10/1732148930003/Put/seqid=0 2024-11-21T00:28:51,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742242_1418 (size=31105) 2024-11-21T00:28:51,448 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:51,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:51,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:51,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148991636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:51,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:51,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:51,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-21T00:28:51,808 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/db014d4dec524387a98b41e764ad02e5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/db014d4dec524387a98b41e764ad02e5 2024-11-21T00:28:51,811 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/A of 306fd645e20cdcec516bf24d0ab4894b into db014d4dec524387a98b41e764ad02e5(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:51,812 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:51,812 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/A, priority=13, startTime=1732148930912; duration=0sec 2024-11-21T00:28:51,812 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:51,812 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:A 2024-11-21T00:28:51,814 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/1bdc5b93686b4e5ea5cd07b4752c142c 2024-11-21T00:28:51,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/4da9e9261e754cd3a3d46cfc9d044685 is 50, key is test_row_0/B:col10/1732148930003/Put/seqid=0 2024-11-21T00:28:51,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742243_1419 (size=12151) 2024-11-21T00:28:51,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/4da9e9261e754cd3a3d46cfc9d044685 2024-11-21T00:28:51,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/3026674cf59a4f929fd733896c8ff4aa is 50, key is test_row_0/C:col10/1732148930003/Put/seqid=0 2024-11-21T00:28:51,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742244_1420 (size=12151) 2024-11-21T00:28:51,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/3026674cf59a4f929fd733896c8ff4aa 2024-11-21T00:28:51,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/1bdc5b93686b4e5ea5cd07b4752c142c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/1bdc5b93686b4e5ea5cd07b4752c142c 2024-11-21T00:28:51,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/1bdc5b93686b4e5ea5cd07b4752c142c, entries=150, sequenceid=244, filesize=30.4 K 2024-11-21T00:28:51,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/4da9e9261e754cd3a3d46cfc9d044685 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/4da9e9261e754cd3a3d46cfc9d044685 2024-11-21T00:28:51,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/4da9e9261e754cd3a3d46cfc9d044685, entries=150, sequenceid=244, filesize=11.9 K 2024-11-21T00:28:51,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/3026674cf59a4f929fd733896c8ff4aa as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/3026674cf59a4f929fd733896c8ff4aa 2024-11-21T00:28:51,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/3026674cf59a4f929fd733896c8ff4aa, entries=150, sequenceid=244, filesize=11.9 K 2024-11-21T00:28:51,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 306fd645e20cdcec516bf24d0ab4894b in 921ms, sequenceid=244, compaction requested=false 2024-11-21T00:28:51,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:51,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:51,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-21T00:28:51,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:51,905 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-21T00:28:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:51,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411215b1eca282b5f49cbb0c8126b3ca29ada_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148931014/Put/seqid=0 2024-11-21T00:28:51,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742245_1421 (size=12354) 2024-11-21T00:28:52,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:52,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:52,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148992198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148992204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148992206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148992206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148992206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148992306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:52,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148992311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148992312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148992312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148992313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,319 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411215b1eca282b5f49cbb0c8126b3ca29ada_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215b1eca282b5f49cbb0c8126b3ca29ada_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:52,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/adaeb36367204513ad72bbf28510d962, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:52,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/adaeb36367204513ad72bbf28510d962 is 175, key is test_row_0/A:col10/1732148931014/Put/seqid=0 2024-11-21T00:28:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742246_1422 (size=31155) 2024-11-21T00:28:52,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148992511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148992516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148992517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148992517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148992517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,724 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=260, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/adaeb36367204513ad72bbf28510d962 2024-11-21T00:28:52,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/0dc7f5473a5d42c0b838412cf9e0bc5c is 50, key is test_row_0/B:col10/1732148931014/Put/seqid=0 2024-11-21T00:28:52,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742247_1423 (size=12201) 2024-11-21T00:28:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-21T00:28:52,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148992814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148992821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148992822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148992822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:52,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148992823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:53,135 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/0dc7f5473a5d42c0b838412cf9e0bc5c 2024-11-21T00:28:53,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/f8834170bd4c4a39802bbcf711229055 is 50, key is test_row_0/C:col10/1732148931014/Put/seqid=0 2024-11-21T00:28:53,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742248_1424 (size=12201) 2024-11-21T00:28:53,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148993325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:53,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148993325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:53,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148993327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:53,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148993329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:53,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148993329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:53,545 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/f8834170bd4c4a39802bbcf711229055 2024-11-21T00:28:53,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/adaeb36367204513ad72bbf28510d962 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/adaeb36367204513ad72bbf28510d962 2024-11-21T00:28:53,553 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/adaeb36367204513ad72bbf28510d962, entries=150, sequenceid=260, filesize=30.4 K 2024-11-21T00:28:53,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/0dc7f5473a5d42c0b838412cf9e0bc5c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/0dc7f5473a5d42c0b838412cf9e0bc5c 2024-11-21T00:28:53,559 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/0dc7f5473a5d42c0b838412cf9e0bc5c, entries=150, sequenceid=260, filesize=11.9 K 2024-11-21T00:28:53,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/f8834170bd4c4a39802bbcf711229055 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f8834170bd4c4a39802bbcf711229055 2024-11-21T00:28:53,563 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f8834170bd4c4a39802bbcf711229055, entries=150, sequenceid=260, filesize=11.9 K 2024-11-21T00:28:53,565 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 306fd645e20cdcec516bf24d0ab4894b in 1659ms, sequenceid=260, compaction requested=true 2024-11-21T00:28:53,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:53,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:53,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-21T00:28:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-21T00:28:53,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-21T00:28:53,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8810 sec 2024-11-21T00:28:53,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.8840 sec 2024-11-21T00:28:54,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:54,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:28:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:54,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411210c46aabf0fea4369bd444dfe6ad5255f_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148932205/Put/seqid=0 2024-11-21T00:28:54,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742249_1425 (size=14994) 2024-11-21T00:28:54,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148994343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148994344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148994344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148994347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148994348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148994450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148994450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148994450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148994451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148994454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148994654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148994654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148994655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148994655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148994660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,747 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:54,750 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411210c46aabf0fea4369bd444dfe6ad5255f_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210c46aabf0fea4369bd444dfe6ad5255f_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:54,751 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/62cbe6a3809f497d8513f6c412d7b818, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:54,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/62cbe6a3809f497d8513f6c412d7b818 is 175, key is test_row_0/A:col10/1732148932205/Put/seqid=0 2024-11-21T00:28:54,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742250_1426 (size=39949) 2024-11-21T00:28:54,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-21T00:28:54,792 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-21T00:28:54,793 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:54,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-21T00:28:54,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-21T00:28:54,794 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:54,795 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:54,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:54,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-21T00:28:54,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-21T00:28:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:54,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:54,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:54,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148994960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148994962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148994962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148994963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:54,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:54,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148994965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-21T00:28:55,100 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-21T00:28:55,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:55,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:55,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:55,176 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=285, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/62cbe6a3809f497d8513f6c412d7b818 2024-11-21T00:28:55,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/cac36877aa374648b6cbb5bd78ec9c34 is 50, key is test_row_0/B:col10/1732148932205/Put/seqid=0 2024-11-21T00:28:55,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742251_1427 (size=12301) 2024-11-21T00:28:55,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/cac36877aa374648b6cbb5bd78ec9c34 2024-11-21T00:28:55,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/6b22238dd5c349c0a0a7b33909d9cb49 is 50, key is test_row_0/C:col10/1732148932205/Put/seqid=0 2024-11-21T00:28:55,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742252_1428 (size=12301) 2024-11-21T00:28:55,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/6b22238dd5c349c0a0a7b33909d9cb49 2024-11-21T00:28:55,253 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-21T00:28:55,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:55,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:55,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:55,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:55,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/62cbe6a3809f497d8513f6c412d7b818 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/62cbe6a3809f497d8513f6c412d7b818 2024-11-21T00:28:55,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/62cbe6a3809f497d8513f6c412d7b818, entries=200, sequenceid=285, filesize=39.0 K 2024-11-21T00:28:55,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/cac36877aa374648b6cbb5bd78ec9c34 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/cac36877aa374648b6cbb5bd78ec9c34 2024-11-21T00:28:55,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/cac36877aa374648b6cbb5bd78ec9c34, entries=150, sequenceid=285, filesize=12.0 K 2024-11-21T00:28:55,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/6b22238dd5c349c0a0a7b33909d9cb49 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b22238dd5c349c0a0a7b33909d9cb49 2024-11-21T00:28:55,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b22238dd5c349c0a0a7b33909d9cb49, entries=150, sequenceid=285, filesize=12.0 K 2024-11-21T00:28:55,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 306fd645e20cdcec516bf24d0ab4894b in 949ms, sequenceid=285, compaction requested=true 2024-11-21T00:28:55,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:55,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:55,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:55,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:55,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:55,283 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:55,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:55,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:28:55,283 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:55,287 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49316 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:55,287 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133826 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:55,287 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/A is initiating minor compaction (all files) 2024-11-21T00:28:55,287 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/B is initiating minor compaction (all files) 2024-11-21T00:28:55,287 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/A in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,287 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/B in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,288 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/db014d4dec524387a98b41e764ad02e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/1bdc5b93686b4e5ea5cd07b4752c142c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/adaeb36367204513ad72bbf28510d962, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/62cbe6a3809f497d8513f6c412d7b818] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=130.7 K 2024-11-21T00:28:55,288 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/87541d9273ac4fbc8b91b7a3e46af2f2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/4da9e9261e754cd3a3d46cfc9d044685, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/0dc7f5473a5d42c0b838412cf9e0bc5c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/cac36877aa374648b6cbb5bd78ec9c34] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=48.2 K 2024-11-21T00:28:55,288 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,288 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/db014d4dec524387a98b41e764ad02e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/1bdc5b93686b4e5ea5cd07b4752c142c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/adaeb36367204513ad72bbf28510d962, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/62cbe6a3809f497d8513f6c412d7b818] 2024-11-21T00:28:55,288 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87541d9273ac4fbc8b91b7a3e46af2f2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148927831 2024-11-21T00:28:55,288 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting db014d4dec524387a98b41e764ad02e5, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148927831 2024-11-21T00:28:55,289 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4da9e9261e754cd3a3d46cfc9d044685, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732148930003 2024-11-21T00:28:55,289 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bdc5b93686b4e5ea5cd07b4752c142c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732148930003 2024-11-21T00:28:55,289 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dc7f5473a5d42c0b838412cf9e0bc5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732148930989 2024-11-21T00:28:55,289 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting cac36877aa374648b6cbb5bd78ec9c34, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732148932196 2024-11-21T00:28:55,289 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting adaeb36367204513ad72bbf28510d962, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732148930989 2024-11-21T00:28:55,290 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 62cbe6a3809f497d8513f6c412d7b818, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732148932196 2024-11-21T00:28:55,321 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#B#compaction#360 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:55,322 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/261d7998859b469096d02abfcceef51d is 50, key is test_row_0/B:col10/1732148932205/Put/seqid=0 2024-11-21T00:28:55,323 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:55,343 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411217c9baaa831664924b4606e8cd878debe_306fd645e20cdcec516bf24d0ab4894b store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:55,346 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411217c9baaa831664924b4606e8cd878debe_306fd645e20cdcec516bf24d0ab4894b, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:55,346 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411217c9baaa831664924b4606e8cd878debe_306fd645e20cdcec516bf24d0ab4894b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:55,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742253_1429 (size=12949) 2024-11-21T00:28:55,393 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/261d7998859b469096d02abfcceef51d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/261d7998859b469096d02abfcceef51d 2024-11-21T00:28:55,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-21T00:28:55,398 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/B of 306fd645e20cdcec516bf24d0ab4894b into 261d7998859b469096d02abfcceef51d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:55,398 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:55,398 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/B, priority=12, startTime=1732148935283; duration=0sec 2024-11-21T00:28:55,399 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:55,399 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:B 2024-11-21T00:28:55,399 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:28:55,400 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49316 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:28:55,400 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/C is initiating minor compaction (all files) 2024-11-21T00:28:55,400 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/C in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,400 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4de57c16950149d5ae18b7071aeea71b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/3026674cf59a4f929fd733896c8ff4aa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f8834170bd4c4a39802bbcf711229055, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b22238dd5c349c0a0a7b33909d9cb49] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=48.2 K 2024-11-21T00:28:55,401 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4de57c16950149d5ae18b7071aeea71b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148927831 2024-11-21T00:28:55,402 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3026674cf59a4f929fd733896c8ff4aa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732148930003 2024-11-21T00:28:55,403 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8834170bd4c4a39802bbcf711229055, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732148930989 2024-11-21T00:28:55,403 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b22238dd5c349c0a0a7b33909d9cb49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732148932196 2024-11-21T00:28:55,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-21T00:28:55,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:55,411 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:28:55,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:55,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:55,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:55,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:55,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:55,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742254_1430 (size=4469) 2024-11-21T00:28:55,435 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#C#compaction#362 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:55,436 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/c5aa9a0539ff4dc8a70501df56b3e253 is 50, key is test_row_0/C:col10/1732148932205/Put/seqid=0 2024-11-21T00:28:55,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112192c6d32eed964c09ae2079461f375daa_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148934347/Put/seqid=0 2024-11-21T00:28:55,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:55,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742255_1431 (size=12949) 2024-11-21T00:28:55,499 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/c5aa9a0539ff4dc8a70501df56b3e253 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/c5aa9a0539ff4dc8a70501df56b3e253 2024-11-21T00:28:55,503 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/C of 306fd645e20cdcec516bf24d0ab4894b into c5aa9a0539ff4dc8a70501df56b3e253(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:55,504 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:55,504 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/C, priority=12, startTime=1732148935283; duration=0sec 2024-11-21T00:28:55,504 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:55,504 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:C 2024-11-21T00:28:55,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742256_1432 (size=12454) 2024-11-21T00:28:55,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:55,534 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112192c6d32eed964c09ae2079461f375daa_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112192c6d32eed964c09ae2079461f375daa_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:55,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f3950d56f8714154a035c705ec689d53, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:55,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f3950d56f8714154a035c705ec689d53 is 175, key is test_row_0/A:col10/1732148934347/Put/seqid=0 2024-11-21T00:28:55,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742257_1433 (size=31255) 2024-11-21T00:28:55,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148995595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148995604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148995610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148995612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148995619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148995721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148995721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148995723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148995728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148995729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,826 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#A#compaction#361 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:55,826 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/648b2f0e345748d796950c8dfcfe9d29 is 175, key is test_row_0/A:col10/1732148932205/Put/seqid=0 2024-11-21T00:28:55,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742258_1434 (size=31903) 2024-11-21T00:28:55,835 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/648b2f0e345748d796950c8dfcfe9d29 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/648b2f0e345748d796950c8dfcfe9d29 2024-11-21T00:28:55,840 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/A of 306fd645e20cdcec516bf24d0ab4894b into 648b2f0e345748d796950c8dfcfe9d29(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:55,840 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:55,840 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/A, priority=12, startTime=1732148935283; duration=0sec 2024-11-21T00:28:55,840 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:55,840 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:A 2024-11-21T00:28:55,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-21T00:28:55,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148995929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148995931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148995934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148995936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148995936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:55,972 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f3950d56f8714154a035c705ec689d53 2024-11-21T00:28:55,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/bc5f849550dd4b9d81ff98c1c29afd0a is 50, key is test_row_0/B:col10/1732148934347/Put/seqid=0 2024-11-21T00:28:56,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742259_1435 (size=12301) 2024-11-21T00:28:56,022 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/bc5f849550dd4b9d81ff98c1c29afd0a 2024-11-21T00:28:56,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/1fcd3b1b4c5d433abd9b2f44a0130f56 is 50, key is test_row_0/C:col10/1732148934347/Put/seqid=0 2024-11-21T00:28:56,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742260_1436 (size=12301) 2024-11-21T00:28:56,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148996234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148996238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148996238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148996241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148996241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,483 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/1fcd3b1b4c5d433abd9b2f44a0130f56 2024-11-21T00:28:56,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/f3950d56f8714154a035c705ec689d53 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f3950d56f8714154a035c705ec689d53 2024-11-21T00:28:56,540 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f3950d56f8714154a035c705ec689d53, entries=150, sequenceid=297, filesize=30.5 K 2024-11-21T00:28:56,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/bc5f849550dd4b9d81ff98c1c29afd0a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bc5f849550dd4b9d81ff98c1c29afd0a 2024-11-21T00:28:56,577 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bc5f849550dd4b9d81ff98c1c29afd0a, entries=150, sequenceid=297, filesize=12.0 K 2024-11-21T00:28:56,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/1fcd3b1b4c5d433abd9b2f44a0130f56 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1fcd3b1b4c5d433abd9b2f44a0130f56 2024-11-21T00:28:56,603 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1fcd3b1b4c5d433abd9b2f44a0130f56, entries=150, sequenceid=297, filesize=12.0 K 2024-11-21T00:28:56,605 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 306fd645e20cdcec516bf24d0ab4894b in 1194ms, sequenceid=297, compaction requested=false 2024-11-21T00:28:56,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:56,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:56,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-21T00:28:56,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-21T00:28:56,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-21T00:28:56,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8110 sec 2024-11-21T00:28:56,609 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8150 sec 2024-11-21T00:28:56,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:56,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-21T00:28:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:56,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:56,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b52279034ce34effb7495e6161c454bc_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148935612/Put/seqid=0 2024-11-21T00:28:56,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148996751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148996758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148996760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148996761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148996761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742261_1437 (size=14994) 2024-11-21T00:28:56,785 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:56,789 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b52279034ce34effb7495e6161c454bc_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b52279034ce34effb7495e6161c454bc_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:56,789 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/d46d4e8231274dc380786ecf2613c029, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:56,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/d46d4e8231274dc380786ecf2613c029 is 175, key is test_row_0/A:col10/1732148935612/Put/seqid=0 2024-11-21T00:28:56,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742262_1438 (size=39949) 2024-11-21T00:28:56,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148996862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148996863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148996870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148996873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:56,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148996873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:56,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-21T00:28:56,903 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-21T00:28:56,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:28:56,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-21T00:28:56,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-21T00:28:56,916 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:28:56,917 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:28:56,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:28:57,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-21T00:28:57,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148997065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:57,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:57,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148997075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148997078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148997080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148997080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-21T00:28:57,224 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/d46d4e8231274dc380786ecf2613c029 2024-11-21T00:28:57,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/e032bea22f3d4718a3b430264dc867b4 is 50, key is test_row_0/B:col10/1732148935612/Put/seqid=0 2024-11-21T00:28:57,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:57,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:57,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742263_1439 (size=12301) 2024-11-21T00:28:57,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/e032bea22f3d4718a3b430264dc867b4 2024-11-21T00:28:57,298 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:57,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/479b09aea4a9432581ad26d0340c9202 is 50, key is test_row_0/C:col10/1732148935612/Put/seqid=0 2024-11-21T00:28:57,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742264_1440 (size=12301) 2024-11-21T00:28:57,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/479b09aea4a9432581ad26d0340c9202 2024-11-21T00:28:57,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/d46d4e8231274dc380786ecf2613c029 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/d46d4e8231274dc380786ecf2613c029 2024-11-21T00:28:57,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/d46d4e8231274dc380786ecf2613c029, entries=200, sequenceid=325, filesize=39.0 K 2024-11-21T00:28:57,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/e032bea22f3d4718a3b430264dc867b4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e032bea22f3d4718a3b430264dc867b4 2024-11-21T00:28:57,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e032bea22f3d4718a3b430264dc867b4, entries=150, sequenceid=325, filesize=12.0 K 2024-11-21T00:28:57,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/479b09aea4a9432581ad26d0340c9202 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/479b09aea4a9432581ad26d0340c9202 2024-11-21T00:28:57,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148997372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/479b09aea4a9432581ad26d0340c9202, entries=150, sequenceid=325, filesize=12.0 K 2024-11-21T00:28:57,388 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:57,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:57,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 306fd645e20cdcec516bf24d0ab4894b in 650ms, sequenceid=325, compaction requested=true 2024-11-21T00:28:57,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:57,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:57,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:57,391 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:57,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:57,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:57,391 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:57,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:57,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:57,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-21T00:28:57,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:57,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:57,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:57,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:57,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:57,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:57,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:57,400 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:57,400 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:57,400 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/B is initiating minor compaction (all files) 2024-11-21T00:28:57,400 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/A is initiating minor compaction (all files) 2024-11-21T00:28:57,400 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/A in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,400 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/648b2f0e345748d796950c8dfcfe9d29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f3950d56f8714154a035c705ec689d53, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/d46d4e8231274dc380786ecf2613c029] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=100.7 K 2024-11-21T00:28:57,400 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,400 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/648b2f0e345748d796950c8dfcfe9d29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f3950d56f8714154a035c705ec689d53, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/d46d4e8231274dc380786ecf2613c029] 2024-11-21T00:28:57,400 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/B in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,400 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/261d7998859b469096d02abfcceef51d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bc5f849550dd4b9d81ff98c1c29afd0a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e032bea22f3d4718a3b430264dc867b4] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=36.7 K 2024-11-21T00:28:57,403 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 648b2f0e345748d796950c8dfcfe9d29, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732148932196 2024-11-21T00:28:57,403 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 261d7998859b469096d02abfcceef51d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732148932196 2024-11-21T00:28:57,404 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting bc5f849550dd4b9d81ff98c1c29afd0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732148934340 2024-11-21T00:28:57,404 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3950d56f8714154a035c705ec689d53, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732148934340 2024-11-21T00:28:57,404 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e032bea22f3d4718a3b430264dc867b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732148935610 2024-11-21T00:28:57,404 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting d46d4e8231274dc380786ecf2613c029, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732148935599 2024-11-21T00:28:57,419 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:57,428 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#B#compaction#370 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:57,429 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/1da22f74bf444a36bafb8fc48d35bcae is 50, key is test_row_0/B:col10/1732148935612/Put/seqid=0 2024-11-21T00:28:57,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112161e02f9f9f2044508dc430a85628a83e_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148936750/Put/seqid=0 2024-11-21T00:28:57,434 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121cd3c86e0fd9c4541b0652416e428d444_306fd645e20cdcec516bf24d0ab4894b store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:57,436 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121cd3c86e0fd9c4541b0652416e428d444_306fd645e20cdcec516bf24d0ab4894b, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:57,437 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121cd3c86e0fd9c4541b0652416e428d444_306fd645e20cdcec516bf24d0ab4894b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:57,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148997447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148997448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148997449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148997451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742265_1441 (size=13051) 2024-11-21T00:28:57,489 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/1da22f74bf444a36bafb8fc48d35bcae as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/1da22f74bf444a36bafb8fc48d35bcae 2024-11-21T00:28:57,497 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/B of 306fd645e20cdcec516bf24d0ab4894b into 1da22f74bf444a36bafb8fc48d35bcae(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:57,497 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:57,497 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/B, priority=13, startTime=1732148937391; duration=0sec 2024-11-21T00:28:57,497 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:57,497 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:B 2024-11-21T00:28:57,497 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:57,499 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:57,500 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/C is initiating minor compaction (all files) 2024-11-21T00:28:57,500 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/C in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,500 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/c5aa9a0539ff4dc8a70501df56b3e253, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1fcd3b1b4c5d433abd9b2f44a0130f56, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/479b09aea4a9432581ad26d0340c9202] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=36.7 K 2024-11-21T00:28:57,500 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c5aa9a0539ff4dc8a70501df56b3e253, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732148932196 2024-11-21T00:28:57,504 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fcd3b1b4c5d433abd9b2f44a0130f56, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732148934340 2024-11-21T00:28:57,506 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 479b09aea4a9432581ad26d0340c9202, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732148935610 2024-11-21T00:28:57,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742267_1443 (size=4469) 2024-11-21T00:28:57,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-21T00:28:57,510 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#A#compaction#369 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:57,511 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/14e8dde8d0304266b8c8daa194214974 is 175, key is test_row_0/A:col10/1732148935612/Put/seqid=0 2024-11-21T00:28:57,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742266_1442 (size=17534) 2024-11-21T00:28:57,512 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:57,519 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112161e02f9f9f2044508dc430a85628a83e_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112161e02f9f9f2044508dc430a85628a83e_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:57,520 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/0cd05d781f8143bd942d1dfd4c7578d9, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:57,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/0cd05d781f8143bd942d1dfd4c7578d9 is 175, key is test_row_0/A:col10/1732148936750/Put/seqid=0 2024-11-21T00:28:57,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:57,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:57,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,545 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#C#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:57,548 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/cb8a80bc1c0846da8b559b5fe46b6b5f is 50, key is test_row_0/C:col10/1732148935612/Put/seqid=0 2024-11-21T00:28:57,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148997559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148997560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148997560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148997560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742269_1445 (size=48639) 2024-11-21T00:28:57,578 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=338, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/0cd05d781f8143bd942d1dfd4c7578d9 2024-11-21T00:28:57,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742268_1444 (size=32005) 2024-11-21T00:28:57,613 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/14e8dde8d0304266b8c8daa194214974 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/14e8dde8d0304266b8c8daa194214974 2024-11-21T00:28:57,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ba655ac015e740c38c18562c5d6dcf35 is 50, key is test_row_0/B:col10/1732148936750/Put/seqid=0 2024-11-21T00:28:57,622 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/A of 306fd645e20cdcec516bf24d0ab4894b into 14e8dde8d0304266b8c8daa194214974(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:57,623 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:57,624 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/A, priority=13, startTime=1732148937391; duration=0sec 2024-11-21T00:28:57,624 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:57,624 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:A 2024-11-21T00:28:57,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742270_1446 (size=13051) 2024-11-21T00:28:57,651 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/cb8a80bc1c0846da8b559b5fe46b6b5f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cb8a80bc1c0846da8b559b5fe46b6b5f 2024-11-21T00:28:57,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742271_1447 (size=12301) 2024-11-21T00:28:57,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ba655ac015e740c38c18562c5d6dcf35 2024-11-21T00:28:57,667 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/C of 306fd645e20cdcec516bf24d0ab4894b into cb8a80bc1c0846da8b559b5fe46b6b5f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:57,667 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:57,667 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/C, priority=13, startTime=1732148937391; duration=0sec 2024-11-21T00:28:57,667 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:57,667 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:C 2024-11-21T00:28:57,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/5aaa6a5adee54e548cec5cbd00824a1b is 50, key is test_row_0/C:col10/1732148936750/Put/seqid=0 2024-11-21T00:28:57,693 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:57,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:57,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742272_1448 (size=12301) 2024-11-21T00:28:57,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/5aaa6a5adee54e548cec5cbd00824a1b 2024-11-21T00:28:57,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/0cd05d781f8143bd942d1dfd4c7578d9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/0cd05d781f8143bd942d1dfd4c7578d9 2024-11-21T00:28:57,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/0cd05d781f8143bd942d1dfd4c7578d9, entries=250, sequenceid=338, filesize=47.5 K 2024-11-21T00:28:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ba655ac015e740c38c18562c5d6dcf35 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba655ac015e740c38c18562c5d6dcf35 2024-11-21T00:28:57,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba655ac015e740c38c18562c5d6dcf35, entries=150, sequenceid=338, filesize=12.0 K 2024-11-21T00:28:57,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/5aaa6a5adee54e548cec5cbd00824a1b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/5aaa6a5adee54e548cec5cbd00824a1b 2024-11-21T00:28:57,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/5aaa6a5adee54e548cec5cbd00824a1b, entries=150, sequenceid=338, filesize=12.0 K 2024-11-21T00:28:57,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 306fd645e20cdcec516bf24d0ab4894b in 379ms, sequenceid=338, compaction requested=false 2024-11-21T00:28:57,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:57,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-21T00:28:57,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:57,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:57,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:57,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:57,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:57,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:57,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148997795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148997800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148997800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148997807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121001d05261737462491f18c03eb94c184_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148937780/Put/seqid=0 2024-11-21T00:28:57,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742273_1449 (size=14994) 2024-11-21T00:28:57,855 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:57,859 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:57,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:57,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:57,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:57,869 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121001d05261737462491f18c03eb94c184_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121001d05261737462491f18c03eb94c184_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:57,870 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9c35dc3deb814687948c492e7e71b072, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:57,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9c35dc3deb814687948c492e7e71b072 is 175, key is test_row_0/A:col10/1732148937780/Put/seqid=0 2024-11-21T00:28:57,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148997880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742274_1450 (size=39949) 2024-11-21T00:28:57,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148997910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148997911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148997911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:57,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148997917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-21T00:28:58,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:58,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:58,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148998114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148998115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148998115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148998125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,160 DEBUG [Thread-1599 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b660061 to 127.0.0.1:64241 2024-11-21T00:28:58,160 DEBUG [Thread-1599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:58,163 DEBUG [Thread-1603 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x28dc77ab to 127.0.0.1:64241 2024-11-21T00:28:58,164 DEBUG [Thread-1603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:58,166 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,167 DEBUG [Thread-1601 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45ad0ff5 to 127.0.0.1:64241 2024-11-21T00:28:58,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:58,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:58,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,168 DEBUG [Thread-1601 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:58,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,168 DEBUG [Thread-1605 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70304ef6 to 127.0.0.1:64241 2024-11-21T00:28:58,168 DEBUG [Thread-1605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:58,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,171 DEBUG [Thread-1607 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f8ea360 to 127.0.0.1:64241 2024-11-21T00:28:58,171 DEBUG [Thread-1607 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:58,291 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=365, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9c35dc3deb814687948c492e7e71b072 2024-11-21T00:28:58,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/2d093004ab6b4be8a797c9e99281cfd4 is 50, key is test_row_0/B:col10/1732148937780/Put/seqid=0 2024-11-21T00:28:58,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742275_1451 (size=12301) 2024-11-21T00:28:58,322 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148998421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148998422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148998423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148998430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,475 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,633 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:58,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:58,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,634 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/2d093004ab6b4be8a797c9e99281cfd4 2024-11-21T00:28:58,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/47da6b65650b4ca794bfb8ab0634247b is 50, key is test_row_0/C:col10/1732148937780/Put/seqid=0 2024-11-21T00:28:58,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742276_1452 (size=12301) 2024-11-21T00:28:58,786 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:58,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:58,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45732 deadline: 1732148998894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45720 deadline: 1732148998924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45770 deadline: 1732148998925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45784 deadline: 1732148998927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:28:58,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45730 deadline: 1732148998934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,938 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:58,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:58,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:58,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:58,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:58,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:59,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-21T00:28:59,091 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:59,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:59,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:59,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:59,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:59,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:59,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:28:59,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/47da6b65650b4ca794bfb8ab0634247b 2024-11-21T00:28:59,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/9c35dc3deb814687948c492e7e71b072 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9c35dc3deb814687948c492e7e71b072 2024-11-21T00:28:59,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9c35dc3deb814687948c492e7e71b072, entries=200, sequenceid=365, filesize=39.0 K 2024-11-21T00:28:59,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/2d093004ab6b4be8a797c9e99281cfd4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/2d093004ab6b4be8a797c9e99281cfd4 2024-11-21T00:28:59,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/2d093004ab6b4be8a797c9e99281cfd4, entries=150, sequenceid=365, filesize=12.0 K 2024-11-21T00:28:59,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/47da6b65650b4ca794bfb8ab0634247b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/47da6b65650b4ca794bfb8ab0634247b 2024-11-21T00:28:59,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/47da6b65650b4ca794bfb8ab0634247b, entries=150, sequenceid=365, filesize=12.0 K 2024-11-21T00:28:59,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 306fd645e20cdcec516bf24d0ab4894b in 1366ms, sequenceid=365, compaction requested=true 2024-11-21T00:28:59,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:59,147 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:59,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:28:59,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:59,147 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:59,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:28:59,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:59,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 306fd645e20cdcec516bf24d0ab4894b:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:28:59,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:59,148 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120593 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:59,148 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/A is initiating minor compaction (all files) 2024-11-21T00:28:59,148 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/A in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:59,149 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/14e8dde8d0304266b8c8daa194214974, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/0cd05d781f8143bd942d1dfd4c7578d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9c35dc3deb814687948c492e7e71b072] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=117.8 K 2024-11-21T00:28:59,149 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:59,149 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/14e8dde8d0304266b8c8daa194214974, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/0cd05d781f8143bd942d1dfd4c7578d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9c35dc3deb814687948c492e7e71b072] 2024-11-21T00:28:59,149 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:59,149 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/B is initiating minor compaction (all files) 2024-11-21T00:28:59,149 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14e8dde8d0304266b8c8daa194214974, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732148935610 2024-11-21T00:28:59,149 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/B in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:59,149 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/1da22f74bf444a36bafb8fc48d35bcae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba655ac015e740c38c18562c5d6dcf35, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/2d093004ab6b4be8a797c9e99281cfd4] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=36.8 K 2024-11-21T00:28:59,149 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cd05d781f8143bd942d1dfd4c7578d9, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732148936750 2024-11-21T00:28:59,150 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c35dc3deb814687948c492e7e71b072, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732148937441 2024-11-21T00:28:59,150 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1da22f74bf444a36bafb8fc48d35bcae, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732148935610 2024-11-21T00:28:59,150 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ba655ac015e740c38c18562c5d6dcf35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732148936750 2024-11-21T00:28:59,151 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d093004ab6b4be8a797c9e99281cfd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732148937441 2024-11-21T00:28:59,173 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:59,173 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#B#compaction#378 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:59,173 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/fee1fb484edd43a9af71bd21447bc4e8 is 50, key is test_row_0/B:col10/1732148937780/Put/seqid=0 2024-11-21T00:28:59,186 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121c53f14f2dfe04a4cad97d36603fb4e46_306fd645e20cdcec516bf24d0ab4894b store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:59,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742277_1453 (size=13153) 2024-11-21T00:28:59,221 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/fee1fb484edd43a9af71bd21447bc4e8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/fee1fb484edd43a9af71bd21447bc4e8 2024-11-21T00:28:59,226 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/B of 306fd645e20cdcec516bf24d0ab4894b into fee1fb484edd43a9af71bd21447bc4e8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:59,226 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:59,226 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/B, priority=13, startTime=1732148939147; duration=0sec 2024-11-21T00:28:59,226 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:28:59,226 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:B 2024-11-21T00:28:59,226 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:28:59,227 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:28:59,227 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 306fd645e20cdcec516bf24d0ab4894b/C is initiating minor compaction (all files) 2024-11-21T00:28:59,227 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 306fd645e20cdcec516bf24d0ab4894b/C in TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:59,227 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cb8a80bc1c0846da8b559b5fe46b6b5f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/5aaa6a5adee54e548cec5cbd00824a1b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/47da6b65650b4ca794bfb8ab0634247b] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp, totalSize=36.8 K 2024-11-21T00:28:59,227 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting cb8a80bc1c0846da8b559b5fe46b6b5f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732148935610 2024-11-21T00:28:59,228 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aaa6a5adee54e548cec5cbd00824a1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732148936750 2024-11-21T00:28:59,228 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 47da6b65650b4ca794bfb8ab0634247b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732148937441 2024-11-21T00:28:59,237 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121c53f14f2dfe04a4cad97d36603fb4e46_306fd645e20cdcec516bf24d0ab4894b, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:59,237 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121c53f14f2dfe04a4cad97d36603fb4e46_306fd645e20cdcec516bf24d0ab4894b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:59,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:28:59,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-21T00:28:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:28:59,244 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:28:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:28:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:28:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:28:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:28:59,245 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#C#compaction#380 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:59,246 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/819e51a88a134792849c31c8c7260f04 is 50, key is test_row_0/C:col10/1732148937780/Put/seqid=0 2024-11-21T00:28:59,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742278_1454 (size=4469) 2024-11-21T00:28:59,282 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 306fd645e20cdcec516bf24d0ab4894b#A#compaction#379 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:28:59,283 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/ec58172a2c234fa0ae67ad2d68314f37 is 175, key is test_row_0/A:col10/1732148937780/Put/seqid=0 2024-11-21T00:28:59,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742279_1455 (size=13153) 2024-11-21T00:28:59,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411211c422ea1a6b14defae8cbdc39586356a_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148937805/Put/seqid=0 2024-11-21T00:28:59,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742280_1456 (size=32107) 2024-11-21T00:28:59,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742281_1457 (size=12454) 2024-11-21T00:28:59,362 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/ec58172a2c234fa0ae67ad2d68314f37 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/ec58172a2c234fa0ae67ad2d68314f37 2024-11-21T00:28:59,369 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/A of 306fd645e20cdcec516bf24d0ab4894b into ec58172a2c234fa0ae67ad2d68314f37(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:59,369 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:59,369 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/A, priority=13, startTime=1732148939147; duration=0sec 2024-11-21T00:28:59,369 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:59,369 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:A 2024-11-21T00:28:59,707 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/819e51a88a134792849c31c8c7260f04 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/819e51a88a134792849c31c8c7260f04 2024-11-21T00:28:59,711 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 306fd645e20cdcec516bf24d0ab4894b/C of 306fd645e20cdcec516bf24d0ab4894b into 819e51a88a134792849c31c8c7260f04(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:28:59,711 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:28:59,712 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b., storeName=306fd645e20cdcec516bf24d0ab4894b/C, priority=13, startTime=1732148939148; duration=0sec 2024-11-21T00:28:59,712 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:28:59,712 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 306fd645e20cdcec516bf24d0ab4894b:C 2024-11-21T00:28:59,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:59,767 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411211c422ea1a6b14defae8cbdc39586356a_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211c422ea1a6b14defae8cbdc39586356a_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:59,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb7dd2ae618c4e2497dbeff885db6d54, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:28:59,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb7dd2ae618c4e2497dbeff885db6d54 is 175, key is test_row_0/A:col10/1732148937805/Put/seqid=0 2024-11-21T00:28:59,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742282_1458 (size=31255) 2024-11-21T00:28:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:28:59,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. as already flushing 2024-11-21T00:28:59,934 DEBUG [Thread-1596 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c0ec341 to 127.0.0.1:64241 2024-11-21T00:28:59,934 DEBUG [Thread-1596 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:59,935 DEBUG [Thread-1588 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1157d18a to 127.0.0.1:64241 2024-11-21T00:28:59,936 DEBUG [Thread-1588 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:59,937 DEBUG [Thread-1592 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77b5b03d to 127.0.0.1:64241 2024-11-21T00:28:59,937 DEBUG [Thread-1592 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:59,945 DEBUG [Thread-1590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bcb3d to 127.0.0.1:64241 2024-11-21T00:28:59,945 DEBUG [Thread-1590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:00,176 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=378, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb7dd2ae618c4e2497dbeff885db6d54 2024-11-21T00:29:00,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ba1d5372619c4425adbac0509e145c41 is 50, key is test_row_0/B:col10/1732148937805/Put/seqid=0 2024-11-21T00:29:00,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742283_1459 (size=12301) 2024-11-21T00:29:00,586 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ba1d5372619c4425adbac0509e145c41 2024-11-21T00:29:00,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/97294a12ad074bf092e5b86dcf2e7cba is 50, key is test_row_0/C:col10/1732148937805/Put/seqid=0 2024-11-21T00:29:00,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742284_1460 (size=12301) 2024-11-21T00:29:00,596 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/97294a12ad074bf092e5b86dcf2e7cba 2024-11-21T00:29:00,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/eb7dd2ae618c4e2497dbeff885db6d54 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb7dd2ae618c4e2497dbeff885db6d54 2024-11-21T00:29:00,604 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb7dd2ae618c4e2497dbeff885db6d54, entries=150, sequenceid=378, filesize=30.5 K 2024-11-21T00:29:00,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/ba1d5372619c4425adbac0509e145c41 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba1d5372619c4425adbac0509e145c41 2024-11-21T00:29:00,609 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba1d5372619c4425adbac0509e145c41, entries=150, sequenceid=378, filesize=12.0 K 2024-11-21T00:29:00,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/97294a12ad074bf092e5b86dcf2e7cba as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/97294a12ad074bf092e5b86dcf2e7cba 2024-11-21T00:29:00,614 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/97294a12ad074bf092e5b86dcf2e7cba, entries=150, sequenceid=378, filesize=12.0 K 2024-11-21T00:29:00,614 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=26.84 KB/27480 for 306fd645e20cdcec516bf24d0ab4894b in 1370ms, sequenceid=378, compaction requested=false 2024-11-21T00:29:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:29:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:29:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-21T00:29:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-21T00:29:00,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-21T00:29:00,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6990 sec 2024-11-21T00:29:00,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 3.7130 sec 2024-11-21T00:29:00,910 DEBUG [Thread-1594 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15bd9063 to 127.0.0.1:64241 2024-11-21T00:29:00,910 DEBUG [Thread-1594 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-21T00:29:01,013 INFO [Thread-1598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1831 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5493 rows 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1823 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5469 rows 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1842 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5526 rows 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1832 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5496 rows 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1838 2024-11-21T00:29:01,013 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5514 rows 2024-11-21T00:29:01,013 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-21T00:29:01,013 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04f9fed4 to 127.0.0.1:64241 2024-11-21T00:29:01,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:01,016 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-21T00:29:01,016 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-21T00:29:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:01,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-21T00:29:01,019 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148941019"}]},"ts":"1732148941019"} 2024-11-21T00:29:01,021 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-21T00:29:01,046 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-21T00:29:01,047 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:29:01,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, UNASSIGN}] 2024-11-21T00:29:01,050 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, UNASSIGN 2024-11-21T00:29:01,050 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:01,051 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:29:01,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; CloseRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:29:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-21T00:29:01,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:01,203 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(124): Close 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1681): Closing 306fd645e20cdcec516bf24d0ab4894b, disabling compactions & flushes 2024-11-21T00:29:01,203 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. after waiting 0 ms 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:29:01,203 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(2837): Flushing 306fd645e20cdcec516bf24d0ab4894b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=A 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=B 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:01,203 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 306fd645e20cdcec516bf24d0ab4894b, store=C 2024-11-21T00:29:01,204 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:01,208 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ad5f78acbf7b4d909388927dde9ca6de_306fd645e20cdcec516bf24d0ab4894b is 50, key is test_row_0/A:col10/1732148939943/Put/seqid=0 2024-11-21T00:29:01,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742285_1461 (size=12454) 2024-11-21T00:29:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-21T00:29:01,612 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:01,615 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121ad5f78acbf7b4d909388927dde9ca6de_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ad5f78acbf7b4d909388927dde9ca6de_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:01,615 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a9002caf320b4953a072869105f7f62c, store: [table=TestAcidGuarantees family=A region=306fd645e20cdcec516bf24d0ab4894b] 2024-11-21T00:29:01,616 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a9002caf320b4953a072869105f7f62c is 175, key is test_row_0/A:col10/1732148939943/Put/seqid=0 2024-11-21T00:29:01,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742286_1462 (size=31255) 2024-11-21T00:29:01,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-21T00:29:02,020 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=388, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a9002caf320b4953a072869105f7f62c 2024-11-21T00:29:02,026 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/f3e801955f19476c8dec7e1bb31298af is 50, key is test_row_0/B:col10/1732148939943/Put/seqid=0 2024-11-21T00:29:02,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742287_1463 (size=12301) 2024-11-21T00:29:02,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-21T00:29:02,447 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/f3e801955f19476c8dec7e1bb31298af 2024-11-21T00:29:02,456 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/1a8874e3ef31481c9b9328d0d59d9a93 is 50, key is test_row_0/C:col10/1732148939943/Put/seqid=0 2024-11-21T00:29:02,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742288_1464 (size=12301) 2024-11-21T00:29:02,786 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9a7c5d7865a14b5aba30538f98548504, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7a401054d7fb4f688d99537dc4696f96, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f4413317717f4524862dd42b0dc562e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/2ad49fea1e564710a91e9948e8c5ccce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/47db2b8b6c0c48bebeda1204c8ecb925, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/dac64c5490f344f1bd0ae7f1f9fc1d28, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7e31516382c54d47826573dcd28b8e34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/b0db89da230945c784fc78f173970cb9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/35b414405163437bb3390cd7f55c90a6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/05d8d7e9bb004ceaa8a53682f29aacdb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a7c9694db9ae4c10ba971bb99bdc8b0e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/76e56e446bee4b79931de26343c0fef8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/951d320b47414ebca27a60ecec93d168, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/014fa380489c4fb89fc3dfae546e25bb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb5b45598d91431eb7972c72f08196d6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/db014d4dec524387a98b41e764ad02e5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/1bdc5b93686b4e5ea5cd07b4752c142c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/adaeb36367204513ad72bbf28510d962, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/62cbe6a3809f497d8513f6c412d7b818, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/648b2f0e345748d796950c8dfcfe9d29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f3950d56f8714154a035c705ec689d53, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/d46d4e8231274dc380786ecf2613c029, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/14e8dde8d0304266b8c8daa194214974, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/0cd05d781f8143bd942d1dfd4c7578d9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9c35dc3deb814687948c492e7e71b072] to archive 2024-11-21T00:29:02,787 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:29:02,789 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9a7c5d7865a14b5aba30538f98548504 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9a7c5d7865a14b5aba30538f98548504 2024-11-21T00:29:02,790 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7a401054d7fb4f688d99537dc4696f96 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7a401054d7fb4f688d99537dc4696f96 2024-11-21T00:29:02,791 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f4413317717f4524862dd42b0dc562e5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f4413317717f4524862dd42b0dc562e5 2024-11-21T00:29:02,792 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/2ad49fea1e564710a91e9948e8c5ccce to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/2ad49fea1e564710a91e9948e8c5ccce 2024-11-21T00:29:02,793 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/47db2b8b6c0c48bebeda1204c8ecb925 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/47db2b8b6c0c48bebeda1204c8ecb925 2024-11-21T00:29:02,794 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/dac64c5490f344f1bd0ae7f1f9fc1d28 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/dac64c5490f344f1bd0ae7f1f9fc1d28 2024-11-21T00:29:02,796 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7e31516382c54d47826573dcd28b8e34 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/7e31516382c54d47826573dcd28b8e34 2024-11-21T00:29:02,797 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/b0db89da230945c784fc78f173970cb9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/b0db89da230945c784fc78f173970cb9 2024-11-21T00:29:02,798 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/35b414405163437bb3390cd7f55c90a6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/35b414405163437bb3390cd7f55c90a6 2024-11-21T00:29:02,799 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/05d8d7e9bb004ceaa8a53682f29aacdb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/05d8d7e9bb004ceaa8a53682f29aacdb 2024-11-21T00:29:02,800 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a7c9694db9ae4c10ba971bb99bdc8b0e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a7c9694db9ae4c10ba971bb99bdc8b0e 2024-11-21T00:29:02,803 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/76e56e446bee4b79931de26343c0fef8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/76e56e446bee4b79931de26343c0fef8 2024-11-21T00:29:02,804 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/951d320b47414ebca27a60ecec93d168 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/951d320b47414ebca27a60ecec93d168 2024-11-21T00:29:02,808 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/014fa380489c4fb89fc3dfae546e25bb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/014fa380489c4fb89fc3dfae546e25bb 2024-11-21T00:29:02,809 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb5b45598d91431eb7972c72f08196d6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb5b45598d91431eb7972c72f08196d6 2024-11-21T00:29:02,810 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/db014d4dec524387a98b41e764ad02e5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/db014d4dec524387a98b41e764ad02e5 2024-11-21T00:29:02,811 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/1bdc5b93686b4e5ea5cd07b4752c142c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/1bdc5b93686b4e5ea5cd07b4752c142c 2024-11-21T00:29:02,813 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/adaeb36367204513ad72bbf28510d962 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/adaeb36367204513ad72bbf28510d962 2024-11-21T00:29:02,814 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/62cbe6a3809f497d8513f6c412d7b818 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/62cbe6a3809f497d8513f6c412d7b818 2024-11-21T00:29:02,819 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/648b2f0e345748d796950c8dfcfe9d29 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/648b2f0e345748d796950c8dfcfe9d29 2024-11-21T00:29:02,823 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f3950d56f8714154a035c705ec689d53 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/f3950d56f8714154a035c705ec689d53 2024-11-21T00:29:02,825 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/d46d4e8231274dc380786ecf2613c029 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/d46d4e8231274dc380786ecf2613c029 2024-11-21T00:29:02,826 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/14e8dde8d0304266b8c8daa194214974 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/14e8dde8d0304266b8c8daa194214974 2024-11-21T00:29:02,827 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/0cd05d781f8143bd942d1dfd4c7578d9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/0cd05d781f8143bd942d1dfd4c7578d9 2024-11-21T00:29:02,829 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9c35dc3deb814687948c492e7e71b072 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/9c35dc3deb814687948c492e7e71b072 2024-11-21T00:29:02,833 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c74f5325ba35410dbaae4853b02b624f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/a45c425e22044bb9a38b735f4c9bc03c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c33655d1173942c8be35781269bb3f6c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/991132d3fc70457b95583a49f96144de, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b3659b66cc404d9880fb09df77abb5cf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bbb97bee23f54f28b7fd46e811cdbc79, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e2f486e763ee4d3bb246a9abe749c478, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/5f32c95385dd41ea905ccc5922cb22d0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ece27a92523e4a18b4028c073d9035ce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/52e68e84b62642609b7423207ae551e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/dacc4ae1f8a543c9a36ffd643d0d3e2e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f450013fbd2242fdb4a5d4ddc59c64ac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/499d7dca2bb245d5988eccf67f83d752, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ac1a4d0f0f2e4a8b83bad092b60396f0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/87541d9273ac4fbc8b91b7a3e46af2f2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b8ada9b7d7894656b9c1e43624641564, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/4da9e9261e754cd3a3d46cfc9d044685, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/0dc7f5473a5d42c0b838412cf9e0bc5c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/261d7998859b469096d02abfcceef51d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/cac36877aa374648b6cbb5bd78ec9c34, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bc5f849550dd4b9d81ff98c1c29afd0a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/1da22f74bf444a36bafb8fc48d35bcae, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e032bea22f3d4718a3b430264dc867b4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba655ac015e740c38c18562c5d6dcf35, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/2d093004ab6b4be8a797c9e99281cfd4] to archive 2024-11-21T00:29:02,834 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:29:02,836 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c74f5325ba35410dbaae4853b02b624f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c74f5325ba35410dbaae4853b02b624f 2024-11-21T00:29:02,839 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/a45c425e22044bb9a38b735f4c9bc03c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/a45c425e22044bb9a38b735f4c9bc03c 2024-11-21T00:29:02,840 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c33655d1173942c8be35781269bb3f6c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/c33655d1173942c8be35781269bb3f6c 2024-11-21T00:29:02,841 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/991132d3fc70457b95583a49f96144de to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/991132d3fc70457b95583a49f96144de 2024-11-21T00:29:02,842 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b3659b66cc404d9880fb09df77abb5cf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b3659b66cc404d9880fb09df77abb5cf 2024-11-21T00:29:02,843 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bbb97bee23f54f28b7fd46e811cdbc79 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bbb97bee23f54f28b7fd46e811cdbc79 2024-11-21T00:29:02,844 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e2f486e763ee4d3bb246a9abe749c478 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e2f486e763ee4d3bb246a9abe749c478 2024-11-21T00:29:02,845 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/5f32c95385dd41ea905ccc5922cb22d0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/5f32c95385dd41ea905ccc5922cb22d0 2024-11-21T00:29:02,846 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ece27a92523e4a18b4028c073d9035ce to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ece27a92523e4a18b4028c073d9035ce 2024-11-21T00:29:02,848 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/52e68e84b62642609b7423207ae551e6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/52e68e84b62642609b7423207ae551e6 2024-11-21T00:29:02,849 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/dacc4ae1f8a543c9a36ffd643d0d3e2e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/dacc4ae1f8a543c9a36ffd643d0d3e2e 2024-11-21T00:29:02,854 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f450013fbd2242fdb4a5d4ddc59c64ac to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f450013fbd2242fdb4a5d4ddc59c64ac 2024-11-21T00:29:02,856 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/499d7dca2bb245d5988eccf67f83d752 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/499d7dca2bb245d5988eccf67f83d752 2024-11-21T00:29:02,859 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ac1a4d0f0f2e4a8b83bad092b60396f0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ac1a4d0f0f2e4a8b83bad092b60396f0 2024-11-21T00:29:02,860 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/87541d9273ac4fbc8b91b7a3e46af2f2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/87541d9273ac4fbc8b91b7a3e46af2f2 2024-11-21T00:29:02,863 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b8ada9b7d7894656b9c1e43624641564 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/b8ada9b7d7894656b9c1e43624641564 2024-11-21T00:29:02,869 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/4da9e9261e754cd3a3d46cfc9d044685 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/4da9e9261e754cd3a3d46cfc9d044685 2024-11-21T00:29:02,869 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/1a8874e3ef31481c9b9328d0d59d9a93 2024-11-21T00:29:02,879 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/0dc7f5473a5d42c0b838412cf9e0bc5c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/0dc7f5473a5d42c0b838412cf9e0bc5c 2024-11-21T00:29:02,882 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/A/a9002caf320b4953a072869105f7f62c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a9002caf320b4953a072869105f7f62c 2024-11-21T00:29:02,885 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/261d7998859b469096d02abfcceef51d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/261d7998859b469096d02abfcceef51d 2024-11-21T00:29:02,888 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a9002caf320b4953a072869105f7f62c, entries=150, sequenceid=388, filesize=30.5 K 2024-11-21T00:29:02,890 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/B/f3e801955f19476c8dec7e1bb31298af as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f3e801955f19476c8dec7e1bb31298af 2024-11-21T00:29:02,892 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/cac36877aa374648b6cbb5bd78ec9c34 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/cac36877aa374648b6cbb5bd78ec9c34 2024-11-21T00:29:02,893 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f3e801955f19476c8dec7e1bb31298af, entries=150, sequenceid=388, filesize=12.0 K 2024-11-21T00:29:02,895 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/.tmp/C/1a8874e3ef31481c9b9328d0d59d9a93 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1a8874e3ef31481c9b9328d0d59d9a93 2024-11-21T00:29:02,895 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bc5f849550dd4b9d81ff98c1c29afd0a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/bc5f849550dd4b9d81ff98c1c29afd0a 2024-11-21T00:29:02,899 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/1da22f74bf444a36bafb8fc48d35bcae to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/1da22f74bf444a36bafb8fc48d35bcae 2024-11-21T00:29:02,900 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1a8874e3ef31481c9b9328d0d59d9a93, entries=150, sequenceid=388, filesize=12.0 K 2024-11-21T00:29:02,900 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e032bea22f3d4718a3b430264dc867b4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/e032bea22f3d4718a3b430264dc867b4 2024-11-21T00:29:02,903 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 306fd645e20cdcec516bf24d0ab4894b in 1700ms, sequenceid=388, compaction requested=true 2024-11-21T00:29:02,904 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba655ac015e740c38c18562c5d6dcf35 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba655ac015e740c38c18562c5d6dcf35 2024-11-21T00:29:02,906 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/2d093004ab6b4be8a797c9e99281cfd4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/2d093004ab6b4be8a797c9e99281cfd4 2024-11-21T00:29:02,910 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/56ed5e654f6e4afc879d89f3395a657a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/e42062edb6584afb8c587ca09544bc6c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/46747fcd2de545edb4b3d7380d967073, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/b8ef0721886a41b886d4cbbf57bed49a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f1a25adfa4744068bc7099ca5eda5bd1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/245d3666bad64805a4befd88bf30eb32, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cac6213a975142419cfe3d93dbb61398, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b9deb97a0d943dba383c5c6b437e10d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/be1717b181fb4089802023b66f052810, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/0096c19f074b47998ef54fd64f7d7c02, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4768982e5f864ae789c7e276895ae5e4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/35410e5fdcdd48f5804ee9d9e15d608d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/9af105fc458349cbb81dffe60813d82d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/d5a64934ac474fbab131338b10e6bea9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4de57c16950149d5ae18b7071aeea71b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/639cf602012449589aca946052083216, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/3026674cf59a4f929fd733896c8ff4aa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f8834170bd4c4a39802bbcf711229055, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/c5aa9a0539ff4dc8a70501df56b3e253, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b22238dd5c349c0a0a7b33909d9cb49, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1fcd3b1b4c5d433abd9b2f44a0130f56, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cb8a80bc1c0846da8b559b5fe46b6b5f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/479b09aea4a9432581ad26d0340c9202, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/5aaa6a5adee54e548cec5cbd00824a1b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/47da6b65650b4ca794bfb8ab0634247b] to archive 2024-11-21T00:29:02,911 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:29:02,912 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/56ed5e654f6e4afc879d89f3395a657a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/56ed5e654f6e4afc879d89f3395a657a 2024-11-21T00:29:02,913 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/e42062edb6584afb8c587ca09544bc6c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/e42062edb6584afb8c587ca09544bc6c 2024-11-21T00:29:02,914 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/46747fcd2de545edb4b3d7380d967073 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/46747fcd2de545edb4b3d7380d967073 2024-11-21T00:29:02,915 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/b8ef0721886a41b886d4cbbf57bed49a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/b8ef0721886a41b886d4cbbf57bed49a 2024-11-21T00:29:02,920 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f1a25adfa4744068bc7099ca5eda5bd1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f1a25adfa4744068bc7099ca5eda5bd1 2024-11-21T00:29:02,921 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/245d3666bad64805a4befd88bf30eb32 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/245d3666bad64805a4befd88bf30eb32 2024-11-21T00:29:02,928 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cac6213a975142419cfe3d93dbb61398 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cac6213a975142419cfe3d93dbb61398 2024-11-21T00:29:02,941 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b9deb97a0d943dba383c5c6b437e10d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b9deb97a0d943dba383c5c6b437e10d 2024-11-21T00:29:02,955 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/be1717b181fb4089802023b66f052810 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/be1717b181fb4089802023b66f052810 2024-11-21T00:29:02,956 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/0096c19f074b47998ef54fd64f7d7c02 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/0096c19f074b47998ef54fd64f7d7c02 2024-11-21T00:29:02,963 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4768982e5f864ae789c7e276895ae5e4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4768982e5f864ae789c7e276895ae5e4 2024-11-21T00:29:02,969 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/35410e5fdcdd48f5804ee9d9e15d608d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/35410e5fdcdd48f5804ee9d9e15d608d 2024-11-21T00:29:02,971 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/9af105fc458349cbb81dffe60813d82d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/9af105fc458349cbb81dffe60813d82d 2024-11-21T00:29:02,972 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/d5a64934ac474fbab131338b10e6bea9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/d5a64934ac474fbab131338b10e6bea9 2024-11-21T00:29:02,975 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4de57c16950149d5ae18b7071aeea71b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/4de57c16950149d5ae18b7071aeea71b 2024-11-21T00:29:02,978 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/639cf602012449589aca946052083216 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/639cf602012449589aca946052083216 2024-11-21T00:29:02,980 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/3026674cf59a4f929fd733896c8ff4aa to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/3026674cf59a4f929fd733896c8ff4aa 2024-11-21T00:29:02,981 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f8834170bd4c4a39802bbcf711229055 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/f8834170bd4c4a39802bbcf711229055 2024-11-21T00:29:02,982 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/c5aa9a0539ff4dc8a70501df56b3e253 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/c5aa9a0539ff4dc8a70501df56b3e253 2024-11-21T00:29:02,989 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b22238dd5c349c0a0a7b33909d9cb49 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/6b22238dd5c349c0a0a7b33909d9cb49 2024-11-21T00:29:02,992 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1fcd3b1b4c5d433abd9b2f44a0130f56 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1fcd3b1b4c5d433abd9b2f44a0130f56 2024-11-21T00:29:02,994 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cb8a80bc1c0846da8b559b5fe46b6b5f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/cb8a80bc1c0846da8b559b5fe46b6b5f 2024-11-21T00:29:02,997 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/479b09aea4a9432581ad26d0340c9202 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/479b09aea4a9432581ad26d0340c9202 2024-11-21T00:29:02,999 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/5aaa6a5adee54e548cec5cbd00824a1b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/5aaa6a5adee54e548cec5cbd00824a1b 2024-11-21T00:29:03,000 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0e7930017ff8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/47da6b65650b4ca794bfb8ab0634247b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/47da6b65650b4ca794bfb8ab0634247b 2024-11-21T00:29:03,016 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/recovered.edits/391.seqid, newMaxSeqId=391, maxSeqId=4 2024-11-21T00:29:03,016 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b. 2024-11-21T00:29:03,017 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1635): Region close journal for 306fd645e20cdcec516bf24d0ab4894b: 2024-11-21T00:29:03,018 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(170): Closed 306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,018 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=306fd645e20cdcec516bf24d0ab4894b, regionState=CLOSED 2024-11-21T00:29:03,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-21T00:29:03,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseRegionProcedure 306fd645e20cdcec516bf24d0ab4894b, server=0e7930017ff8,37961,1732148819586 in 1.9680 sec 2024-11-21T00:29:03,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-11-21T00:29:03,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=306fd645e20cdcec516bf24d0ab4894b, UNASSIGN in 1.9720 sec 2024-11-21T00:29:03,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-21T00:29:03,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9760 sec 2024-11-21T00:29:03,023 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148943023"}]},"ts":"1732148943023"} 2024-11-21T00:29:03,024 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-21T00:29:03,071 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-21T00:29:03,072 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0550 sec 2024-11-21T00:29:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-21T00:29:03,122 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-21T00:29:03,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-21T00:29:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:03,124 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-21T00:29:03,124 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=124, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:03,125 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,136 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/recovered.edits] 2024-11-21T00:29:03,155 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a9002caf320b4953a072869105f7f62c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/a9002caf320b4953a072869105f7f62c 2024-11-21T00:29:03,180 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb7dd2ae618c4e2497dbeff885db6d54 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/eb7dd2ae618c4e2497dbeff885db6d54 2024-11-21T00:29:03,182 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/ec58172a2c234fa0ae67ad2d68314f37 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/A/ec58172a2c234fa0ae67ad2d68314f37 2024-11-21T00:29:03,185 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba1d5372619c4425adbac0509e145c41 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/ba1d5372619c4425adbac0509e145c41 2024-11-21T00:29:03,187 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f3e801955f19476c8dec7e1bb31298af to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/f3e801955f19476c8dec7e1bb31298af 2024-11-21T00:29:03,189 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/fee1fb484edd43a9af71bd21447bc4e8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/B/fee1fb484edd43a9af71bd21447bc4e8 2024-11-21T00:29:03,192 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1a8874e3ef31481c9b9328d0d59d9a93 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/1a8874e3ef31481c9b9328d0d59d9a93 2024-11-21T00:29:03,196 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/819e51a88a134792849c31c8c7260f04 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/819e51a88a134792849c31c8c7260f04 2024-11-21T00:29:03,198 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/97294a12ad074bf092e5b86dcf2e7cba to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/C/97294a12ad074bf092e5b86dcf2e7cba 2024-11-21T00:29:03,201 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/recovered.edits/391.seqid to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b/recovered.edits/391.seqid 2024-11-21T00:29:03,202 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,206 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-21T00:29:03,210 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-21T00:29:03,218 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-21T00:29:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-21T00:29:03,254 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121001d05261737462491f18c03eb94c184_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121001d05261737462491f18c03eb94c184_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,262 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210671b9189bd148e3b77358ac399e8051_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210671b9189bd148e3b77358ac399e8051_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,264 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210b6d10622ff24f72a0b132080cbe39f0_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210b6d10622ff24f72a0b132080cbe39f0_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,267 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210c46aabf0fea4369bd444dfe6ad5255f_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411210c46aabf0fea4369bd444dfe6ad5255f_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,268 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211c422ea1a6b14defae8cbdc39586356a_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211c422ea1a6b14defae8cbdc39586356a_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,269 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112126dcb4271b6744aa9545e6e1da06acd1_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112126dcb4271b6744aa9545e6e1da06acd1_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,271 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121318050e28ea34975bc6550c60779d8f3_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121318050e28ea34975bc6550c60779d8f3_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,272 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121329c2594536e4fbaa14806d541270f7d_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121329c2594536e4fbaa14806d541270f7d_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,273 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112143668bbc5f6f4401b66a89f0af158fa0_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112143668bbc5f6f4401b66a89f0af158fa0_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,274 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121593d8759a1594d9687a8961907172911_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121593d8759a1594d9687a8961907172911_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,275 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215b1eca282b5f49cbb0c8126b3ca29ada_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215b1eca282b5f49cbb0c8126b3ca29ada_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,277 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112161e02f9f9f2044508dc430a85628a83e_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112161e02f9f9f2044508dc430a85628a83e_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,290 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112163114e5430f64491901450715853877a_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112163114e5430f64491901450715853877a_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,294 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112192c6d32eed964c09ae2079461f375daa_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112192c6d32eed964c09ae2079461f375daa_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,296 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ab49664161e0480d99c68ffd9085912c_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ab49664161e0480d99c68ffd9085912c_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,297 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ad5f78acbf7b4d909388927dde9ca6de_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ad5f78acbf7b4d909388927dde9ca6de_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,298 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b52279034ce34effb7495e6161c454bc_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b52279034ce34effb7495e6161c454bc_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,300 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ba7fe161081746ac81580527704a14eb_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ba7fe161081746ac81580527704a14eb_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,301 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ddc77e5576d04a3ba843ddfefcc08d8b_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121ddc77e5576d04a3ba843ddfefcc08d8b_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,304 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f4702171c4a6420797935b48816cdb66_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f4702171c4a6420797935b48816cdb66_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,305 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f75d29fc534f4ed090087b7408841f64_306fd645e20cdcec516bf24d0ab4894b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121f75d29fc534f4ed090087b7408841f64_306fd645e20cdcec516bf24d0ab4894b 2024-11-21T00:29:03,306 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-21T00:29:03,309 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=124, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:03,325 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-21T00:29:03,340 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-21T00:29:03,341 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=124, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:03,341 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-21T00:29:03,342 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732148943341"}]},"ts":"9223372036854775807"} 2024-11-21T00:29:03,347 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-21T00:29:03,347 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 306fd645e20cdcec516bf24d0ab4894b, NAME => 'TestAcidGuarantees,,1732148915869.306fd645e20cdcec516bf24d0ab4894b.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T00:29:03,348 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-21T00:29:03,348 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732148943348"}]},"ts":"9223372036854775807"} 2024-11-21T00:29:03,360 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-21T00:29:03,376 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=124, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:03,376 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 254 msec 2024-11-21T00:29:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-21T00:29:03,447 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-21T00:29:03,467 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 238) - Thread LEAK? -, OpenFileDescriptor=464 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=799 (was 857), ProcessCount=11 (was 11), AvailableMemoryMB=3388 (was 2387) - AvailableMemoryMB LEAK? - 2024-11-21T00:29:03,495 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=799, ProcessCount=11, AvailableMemoryMB=3387 2024-11-21T00:29:03,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:29:03,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:29:03,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:03,506 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:29:03,507 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:03,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 125 2024-11-21T00:29:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-21T00:29:03,510 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:29:03,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742289_1465 (size=963) 2024-11-21T00:29:03,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-21T00:29:03,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-21T00:29:03,967 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:29:03,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742290_1466 (size=53) 2024-11-21T00:29:04,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-21T00:29:04,376 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:04,376 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 21e23f12556a7b945d55cc2f3dac60b0, disabling compactions & flushes 2024-11-21T00:29:04,376 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,376 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,376 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. after waiting 0 ms 2024-11-21T00:29:04,376 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,376 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,376 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:04,377 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:29:04,377 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732148944377"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148944377"}]},"ts":"1732148944377"} 2024-11-21T00:29:04,380 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-21T00:29:04,382 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:29:04,384 ERROR [master/0e7930017ff8:0.Chore.1 {}] master.TableStateManager(95): Unable to get table TestAcidGuarantees state org.apache.hadoop.hbase.TableNotFoundException: No state found for TestAcidGuarantees at org.apache.hadoop.hbase.master.TableStateManager.getTableState(TableStateManager.java:155) ~[classes/:?] at org.apache.hadoop.hbase.master.TableStateManager.isTableState(TableStateManager.java:92) ~[classes/:?] at org.apache.hadoop.hbase.master.assignment.AssignmentManager.isTableDisabled(AssignmentManager.java:540) ~[classes/:?] at org.apache.hadoop.hbase.master.assignment.AssignmentManager.getRegionStatesCount(AssignmentManager.java:2609) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.getClusterMetricsWithoutCoprocessor(HMaster.java:3012) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.getClusterMetricsWithoutCoprocessor(HMaster.java:2906) ~[classes/:?] at org.apache.hadoop.hbase.master.balancer.ClusterStatusChore.chore(ClusterStatusChore.java:47) ~[classes/:?] at org.apache.hadoop.hbase.ScheduledChore.run(ScheduledChore.java:161) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:305) ~[?:?] at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:305) ~[?:?] at org.apache.hadoop.hbase.JitterScheduledThreadPoolExecutorImpl$JitteredRunnableScheduledFuture.run(JitterScheduledThreadPoolExecutorImpl.java:107) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:04,387 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148944387"}]},"ts":"1732148944387"} 2024-11-21T00:29:04,392 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-21T00:29:04,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=21e23f12556a7b945d55cc2f3dac60b0, ASSIGN}] 2024-11-21T00:29:04,447 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=21e23f12556a7b945d55cc2f3dac60b0, ASSIGN 2024-11-21T00:29:04,448 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=21e23f12556a7b945d55cc2f3dac60b0, ASSIGN; state=OFFLINE, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=false 2024-11-21T00:29:04,598 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=21e23f12556a7b945d55cc2f3dac60b0, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:04,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; OpenRegionProcedure 21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:29:04,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-21T00:29:04,751 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:04,754 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,754 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7285): Opening region: {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:29:04,754 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,754 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:04,755 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7327): checking encryption for 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,755 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7330): checking classloading for 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,767 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,771 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:04,771 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 21e23f12556a7b945d55cc2f3dac60b0 columnFamilyName A 2024-11-21T00:29:04,772 DEBUG [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:04,774 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.HStore(327): Store=21e23f12556a7b945d55cc2f3dac60b0/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:04,774 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,776 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:04,776 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 21e23f12556a7b945d55cc2f3dac60b0 columnFamilyName B 2024-11-21T00:29:04,777 DEBUG [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:04,777 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.HStore(327): Store=21e23f12556a7b945d55cc2f3dac60b0/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:04,777 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,779 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:04,779 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 21e23f12556a7b945d55cc2f3dac60b0 columnFamilyName C 2024-11-21T00:29:04,779 DEBUG [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:04,780 INFO [StoreOpener-21e23f12556a7b945d55cc2f3dac60b0-1 {}] regionserver.HStore(327): Store=21e23f12556a7b945d55cc2f3dac60b0/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:04,780 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,781 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,781 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,783 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:29:04,793 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1085): writing seq id for 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:04,800 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:29:04,804 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1102): Opened 21e23f12556a7b945d55cc2f3dac60b0; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69390694, jitterRate=0.0340019166469574}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:29:04,804 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1001): Region open journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:04,807 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., pid=127, masterSystemTime=1732148944750 2024-11-21T00:29:04,808 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,808 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:04,811 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=21e23f12556a7b945d55cc2f3dac60b0, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:04,815 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-21T00:29:04,815 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; OpenRegionProcedure 21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 in 213 msec 2024-11-21T00:29:04,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-11-21T00:29:04,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=21e23f12556a7b945d55cc2f3dac60b0, ASSIGN in 369 msec 2024-11-21T00:29:04,817 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:29:04,817 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148944817"}]},"ts":"1732148944817"} 2024-11-21T00:29:04,818 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-21T00:29:04,851 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:29:04,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3540 sec 2024-11-21T00:29:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-21T00:29:05,619 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 125 completed 2024-11-21T00:29:05,620 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x345fa4f7 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38dd8644 2024-11-21T00:29:05,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@466b85c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,668 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:05,670 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:05,671 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:05,672 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56592, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:05,673 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x315a23ef to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65e17c26 2024-11-21T00:29:05,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3ee89e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,723 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d125972 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53fc02ba 2024-11-21T00:29:05,747 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b0e6a43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,748 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x134bfe32 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2011d733 2024-11-21T00:29:05,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8e5fd00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,774 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b55f2f to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39b3baa5 2024-11-21T00:29:05,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e195d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,798 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x646ca555 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@402e5def 2024-11-21T00:29:05,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14088aa9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,823 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10bda459 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40302925 2024-11-21T00:29:05,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b8d64d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,856 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0657e1bf to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47ef9951 2024-11-21T00:29:05,891 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@784d683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,892 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dee2855 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@567011a8 2024-11-21T00:29:05,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7761f52b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,916 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54e8a98a to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2430fee 2024-11-21T00:29:05,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a736a20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x677030bd to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d567fc2 2024-11-21T00:29:05,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c153822, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:06,019 DEBUG [hconnection-0x10c84c72-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,020 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,071 DEBUG [hconnection-0x752a7e4e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,074 DEBUG [hconnection-0x7a373f3d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,079 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,079 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,079 DEBUG [hconnection-0x181b269b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,081 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-21T00:29:06,092 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:06,093 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:06,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-21T00:29:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:06,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:29:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:06,120 DEBUG [hconnection-0x1b9962ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,123 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,127 DEBUG [hconnection-0x59b0813c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,128 DEBUG [hconnection-0x156a5360-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,129 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,129 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,169 DEBUG [hconnection-0x6704f4cf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,172 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149006169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149006174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149006178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149006178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149006180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-21T00:29:06,203 DEBUG [hconnection-0x490901f3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,204 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,219 DEBUG [hconnection-0x88c0c23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:06,220 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:06,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/d4d77d46e5714bf091f1536ec2c23fc7 is 50, key is test_row_0/A:col10/1732148946099/Put/seqid=0 2024-11-21T00:29:06,250 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-21T00:29:06,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:06,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149006278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149006279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149006280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149006281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149006283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742291_1467 (size=12001) 2024-11-21T00:29:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-21T00:29:06,403 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-21T00:29:06,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:06,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149006483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149006483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149006483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149006484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149006484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,556 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-21T00:29:06,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:06,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-21T00:29:06,715 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-21T00:29:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/d4d77d46e5714bf091f1536ec2c23fc7 2024-11-21T00:29:06,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149006787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149006787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/3fd9f3aa8598476ab9886c042f340a64 is 50, key is test_row_0/B:col10/1732148946099/Put/seqid=0 2024-11-21T00:29:06,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149006787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149006799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:06,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149006799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,870 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:06,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-21T00:29:06,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742292_1468 (size=12001) 2024-11-21T00:29:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:06,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:06,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/3fd9f3aa8598476ab9886c042f340a64 2024-11-21T00:29:06,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/e9e59320101341debaedb1e5d433d419 is 50, key is test_row_0/C:col10/1732148946099/Put/seqid=0 2024-11-21T00:29:07,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742293_1469 (size=12001) 2024-11-21T00:29:07,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/e9e59320101341debaedb1e5d433d419 2024-11-21T00:29:07,027 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-21T00:29:07,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:07,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:07,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:07,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:07,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:07,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:07,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/d4d77d46e5714bf091f1536ec2c23fc7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d4d77d46e5714bf091f1536ec2c23fc7 2024-11-21T00:29:07,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d4d77d46e5714bf091f1536ec2c23fc7, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:29:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/3fd9f3aa8598476ab9886c042f340a64 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3fd9f3aa8598476ab9886c042f340a64 2024-11-21T00:29:07,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3fd9f3aa8598476ab9886c042f340a64, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:29:07,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/e9e59320101341debaedb1e5d433d419 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/e9e59320101341debaedb1e5d433d419 2024-11-21T00:29:07,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/e9e59320101341debaedb1e5d433d419, entries=150, sequenceid=13, filesize=11.7 K 2024-11-21T00:29:07,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 21e23f12556a7b945d55cc2f3dac60b0 in 1002ms, sequenceid=13, compaction requested=false 2024-11-21T00:29:07,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:07,183 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-21T00:29:07,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:07,184 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:29:07,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:07,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:07,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:07,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:07,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:07,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:07,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-21T00:29:07,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/a3938f1e96fd418f8cb54166a173da65 is 50, key is test_row_0/A:col10/1732148946166/Put/seqid=0 2024-11-21T00:29:07,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742294_1470 (size=12001) 2024-11-21T00:29:07,259 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/a3938f1e96fd418f8cb54166a173da65 2024-11-21T00:29:07,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/19ad8ee9ad274a0da4a5820cb105bf42 is 50, key is test_row_0/B:col10/1732148946166/Put/seqid=0 2024-11-21T00:29:07,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742295_1471 (size=12001) 2024-11-21T00:29:07,304 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/19ad8ee9ad274a0da4a5820cb105bf42 2024-11-21T00:29:07,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:07,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:07,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/23f5e6db1cf844ad870a6a1029a95cad is 50, key is test_row_0/C:col10/1732148946166/Put/seqid=0 2024-11-21T00:29:07,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149007322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149007324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149007334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149007342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149007343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742296_1472 (size=12001) 2024-11-21T00:29:07,373 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/23f5e6db1cf844ad870a6a1029a95cad 2024-11-21T00:29:07,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/a3938f1e96fd418f8cb54166a173da65 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/a3938f1e96fd418f8cb54166a173da65 2024-11-21T00:29:07,420 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/a3938f1e96fd418f8cb54166a173da65, entries=150, sequenceid=38, filesize=11.7 K 2024-11-21T00:29:07,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/19ad8ee9ad274a0da4a5820cb105bf42 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/19ad8ee9ad274a0da4a5820cb105bf42 2024-11-21T00:29:07,443 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/19ad8ee9ad274a0da4a5820cb105bf42, entries=150, sequenceid=38, filesize=11.7 K 2024-11-21T00:29:07,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/23f5e6db1cf844ad870a6a1029a95cad as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/23f5e6db1cf844ad870a6a1029a95cad 2024-11-21T00:29:07,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149007444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149007444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149007448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149007453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149007455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,462 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/23f5e6db1cf844ad870a6a1029a95cad, entries=150, sequenceid=38, filesize=11.7 K 2024-11-21T00:29:07,464 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 21e23f12556a7b945d55cc2f3dac60b0 in 280ms, sequenceid=38, compaction requested=false 2024-11-21T00:29:07,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:07,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:07,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-21T00:29:07,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-21T00:29:07,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-21T00:29:07,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3780 sec 2024-11-21T00:29:07,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.3820 sec 2024-11-21T00:29:07,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:29:07,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:07,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:07,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:07,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:07,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/329cc9126f0346c38e762d1a1b1bcf4b is 50, key is test_row_0/A:col10/1732148947655/Put/seqid=0 2024-11-21T00:29:07,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742297_1473 (size=12001) 2024-11-21T00:29:07,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149007732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149007747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149007748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149007755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149007755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149007870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149007870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149007872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149007875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:07,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:07,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149007875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149008078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149008078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149008087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149008090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149008090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/329cc9126f0346c38e762d1a1b1bcf4b 2024-11-21T00:29:08,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/5d9851ce9f5a48dfa16b8106d3fec314 is 50, key is test_row_0/B:col10/1732148947655/Put/seqid=0 2024-11-21T00:29:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-21T00:29:08,213 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-21T00:29:08,214 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-21T00:29:08,215 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:08,216 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:08,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-21T00:29:08,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742298_1474 (size=12001) 2024-11-21T00:29:08,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/5d9851ce9f5a48dfa16b8106d3fec314 2024-11-21T00:29:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-21T00:29:08,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/17e19f15138f4a76a6674d14b8c585c7 is 50, key is test_row_0/C:col10/1732148947655/Put/seqid=0 2024-11-21T00:29:08,369 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-21T00:29:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:08,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:08,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742299_1475 (size=12001) 2024-11-21T00:29:08,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/17e19f15138f4a76a6674d14b8c585c7 2024-11-21T00:29:08,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149008393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149008393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149008403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149008407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149008407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/329cc9126f0346c38e762d1a1b1bcf4b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/329cc9126f0346c38e762d1a1b1bcf4b 2024-11-21T00:29:08,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/329cc9126f0346c38e762d1a1b1bcf4b, entries=150, sequenceid=50, filesize=11.7 K 2024-11-21T00:29:08,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/5d9851ce9f5a48dfa16b8106d3fec314 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5d9851ce9f5a48dfa16b8106d3fec314 2024-11-21T00:29:08,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5d9851ce9f5a48dfa16b8106d3fec314, entries=150, sequenceid=50, filesize=11.7 K 2024-11-21T00:29:08,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/17e19f15138f4a76a6674d14b8c585c7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/17e19f15138f4a76a6674d14b8c585c7 2024-11-21T00:29:08,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/17e19f15138f4a76a6674d14b8c585c7, entries=150, sequenceid=50, filesize=11.7 K 2024-11-21T00:29:08,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-21T00:29:08,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 21e23f12556a7b945d55cc2f3dac60b0 in 865ms, sequenceid=50, compaction requested=true 2024-11-21T00:29:08,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:08,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:08,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:08,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:08,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:29:08,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:08,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-21T00:29:08,522 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:08,522 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:08,530 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:08,530 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:08,530 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:08,530 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/e9e59320101341debaedb1e5d433d419, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/23f5e6db1cf844ad870a6a1029a95cad, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/17e19f15138f4a76a6674d14b8c585c7] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=35.2 K 2024-11-21T00:29:08,531 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:08,531 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:08,531 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:08,531 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d4d77d46e5714bf091f1536ec2c23fc7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/a3938f1e96fd418f8cb54166a173da65, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/329cc9126f0346c38e762d1a1b1bcf4b] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=35.2 K 2024-11-21T00:29:08,531 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4d77d46e5714bf091f1536ec2c23fc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148946099 2024-11-21T00:29:08,531 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e9e59320101341debaedb1e5d433d419, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148946099 2024-11-21T00:29:08,532 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3938f1e96fd418f8cb54166a173da65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732148946166 2024-11-21T00:29:08,533 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 329cc9126f0346c38e762d1a1b1bcf4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148947291 2024-11-21T00:29:08,534 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:08,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-21T00:29:08,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:08,535 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:29:08,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:08,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:08,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:08,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:08,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:08,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:08,536 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 23f5e6db1cf844ad870a6a1029a95cad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732148946166 2024-11-21T00:29:08,537 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 17e19f15138f4a76a6674d14b8c585c7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148947291 2024-11-21T00:29:08,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/d26815f4c57b46a4aa44b3f79e96216a is 50, key is test_row_0/A:col10/1732148947675/Put/seqid=0 2024-11-21T00:29:08,559 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:08,560 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/2f0924278f7e4536add1c40be8a393fa is 50, key is test_row_0/A:col10/1732148947655/Put/seqid=0 2024-11-21T00:29:08,569 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#398 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:08,570 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/de1abaca0bc942b78b49ff399137e559 is 50, key is test_row_0/C:col10/1732148947655/Put/seqid=0 2024-11-21T00:29:08,579 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-21T00:29:08,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742300_1476 (size=12104) 2024-11-21T00:29:08,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742302_1478 (size=12104) 2024-11-21T00:29:08,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742301_1477 (size=12001) 2024-11-21T00:29:08,655 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/d26815f4c57b46a4aa44b3f79e96216a 2024-11-21T00:29:08,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/add482e2805f4221a4d85051181df494 is 50, key is test_row_0/B:col10/1732148947675/Put/seqid=0 2024-11-21T00:29:08,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742303_1479 (size=12001) 2024-11-21T00:29:08,756 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/add482e2805f4221a4d85051181df494 2024-11-21T00:29:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fe4e92042b8d489188574bbce756633e is 50, key is test_row_0/C:col10/1732148947675/Put/seqid=0 2024-11-21T00:29:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-21T00:29:08,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742304_1480 (size=12001) 2024-11-21T00:29:08,848 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fe4e92042b8d489188574bbce756633e 2024-11-21T00:29:08,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/d26815f4c57b46a4aa44b3f79e96216a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d26815f4c57b46a4aa44b3f79e96216a 2024-11-21T00:29:08,863 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d26815f4c57b46a4aa44b3f79e96216a, entries=150, sequenceid=75, filesize=11.7 K 2024-11-21T00:29:08,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/add482e2805f4221a4d85051181df494 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/add482e2805f4221a4d85051181df494 2024-11-21T00:29:08,870 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/add482e2805f4221a4d85051181df494, entries=150, sequenceid=75, filesize=11.7 K 2024-11-21T00:29:08,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fe4e92042b8d489188574bbce756633e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe4e92042b8d489188574bbce756633e 2024-11-21T00:29:08,876 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe4e92042b8d489188574bbce756633e, entries=150, sequenceid=75, filesize=11.7 K 2024-11-21T00:29:08,879 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 21e23f12556a7b945d55cc2f3dac60b0 in 344ms, sequenceid=75, compaction requested=true 2024-11-21T00:29:08,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:08,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:08,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-21T00:29:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-21T00:29:08,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-21T00:29:08,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 673 msec 2024-11-21T00:29:08,893 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 678 msec 2024-11-21T00:29:08,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:29:08,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:08,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:08,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:08,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:08,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:08,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:08,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:08,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/86576a87409b41adb5dbbf0251dab9e3 is 50, key is test_row_0/A:col10/1732148948954/Put/seqid=0 2024-11-21T00:29:09,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742305_1481 (size=19021) 2024-11-21T00:29:09,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/86576a87409b41adb5dbbf0251dab9e3 2024-11-21T00:29:09,019 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/2f0924278f7e4536add1c40be8a393fa as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2f0924278f7e4536add1c40be8a393fa 2024-11-21T00:29:09,028 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/de1abaca0bc942b78b49ff399137e559 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/de1abaca0bc942b78b49ff399137e559 2024-11-21T00:29:09,032 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into 2f0924278f7e4536add1c40be8a393fa(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:09,032 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:09,032 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148948522; duration=0sec 2024-11-21T00:29:09,032 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:09,032 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:09,032 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:09,042 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:09,043 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:09,043 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,043 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3fd9f3aa8598476ab9886c042f340a64, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/19ad8ee9ad274a0da4a5820cb105bf42, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5d9851ce9f5a48dfa16b8106d3fec314, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/add482e2805f4221a4d85051181df494] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=46.9 K 2024-11-21T00:29:09,044 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fd9f3aa8598476ab9886c042f340a64, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732148946099 2024-11-21T00:29:09,044 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19ad8ee9ad274a0da4a5820cb105bf42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732148946166 2024-11-21T00:29:09,044 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d9851ce9f5a48dfa16b8106d3fec314, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148947291 2024-11-21T00:29:09,045 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting add482e2805f4221a4d85051181df494, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732148947675 2024-11-21T00:29:09,075 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into de1abaca0bc942b78b49ff399137e559(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:09,075 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:09,075 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=13, startTime=1732148948522; duration=0sec 2024-11-21T00:29:09,075 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:09,075 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:09,097 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:09,098 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/568b4a34b69749e1a211e0de434b5560 is 50, key is test_row_0/B:col10/1732148947675/Put/seqid=0 2024-11-21T00:29:09,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/9eba807bb4474b0983734ccacc765489 is 50, key is test_row_0/B:col10/1732148948954/Put/seqid=0 2024-11-21T00:29:09,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149009101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149009104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149009105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149009109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742306_1482 (size=12139) 2024-11-21T00:29:09,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149009121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,198 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/568b4a34b69749e1a211e0de434b5560 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/568b4a34b69749e1a211e0de434b5560 2024-11-21T00:29:09,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742307_1483 (size=12001) 2024-11-21T00:29:09,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/9eba807bb4474b0983734ccacc765489 2024-11-21T00:29:09,223 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 568b4a34b69749e1a211e0de434b5560(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:09,223 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:09,223 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=12, startTime=1732148948522; duration=0sec 2024-11-21T00:29:09,223 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:09,223 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:09,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b34e3648c3a742bc80eca51d8a2d847c is 50, key is test_row_0/C:col10/1732148948954/Put/seqid=0 2024-11-21T00:29:09,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149009251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149009253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149009253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149009253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149009267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742308_1484 (size=12001) 2024-11-21T00:29:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-21T00:29:09,322 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-21T00:29:09,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-21T00:29:09,325 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:09,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-21T00:29:09,325 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:09,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:09,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-21T00:29:09,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149009461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149009463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149009464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,476 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:09,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:09,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149009472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149009483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-21T00:29:09,631 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:09,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b34e3648c3a742bc80eca51d8a2d847c 2024-11-21T00:29:09,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/86576a87409b41adb5dbbf0251dab9e3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/86576a87409b41adb5dbbf0251dab9e3 2024-11-21T00:29:09,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/86576a87409b41adb5dbbf0251dab9e3, entries=300, sequenceid=86, filesize=18.6 K 2024-11-21T00:29:09,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/9eba807bb4474b0983734ccacc765489 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/9eba807bb4474b0983734ccacc765489 2024-11-21T00:29:09,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/9eba807bb4474b0983734ccacc765489, entries=150, sequenceid=86, filesize=11.7 K 2024-11-21T00:29:09,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b34e3648c3a742bc80eca51d8a2d847c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b34e3648c3a742bc80eca51d8a2d847c 2024-11-21T00:29:09,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b34e3648c3a742bc80eca51d8a2d847c, entries=150, sequenceid=86, filesize=11.7 K 2024-11-21T00:29:09,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 21e23f12556a7b945d55cc2f3dac60b0 in 773ms, sequenceid=86, compaction requested=true 2024-11-21T00:29:09,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:09,737 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:09,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:09,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:09,738 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:29:09,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:09,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:09,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:09,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:09,740 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:29:09,740 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:29:09,740 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. because compaction request was cancelled 2024-11-21T00:29:09,740 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:09,740 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:09,740 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43126 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:09,740 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:09,741 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,741 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2f0924278f7e4536add1c40be8a393fa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d26815f4c57b46a4aa44b3f79e96216a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/86576a87409b41adb5dbbf0251dab9e3] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=42.1 K 2024-11-21T00:29:09,741 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:09,741 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:09,741 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,741 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/de1abaca0bc942b78b49ff399137e559, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe4e92042b8d489188574bbce756633e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b34e3648c3a742bc80eca51d8a2d847c] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=35.3 K 2024-11-21T00:29:09,742 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting de1abaca0bc942b78b49ff399137e559, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148947291 2024-11-21T00:29:09,742 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fe4e92042b8d489188574bbce756633e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732148947675 2024-11-21T00:29:09,742 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b34e3648c3a742bc80eca51d8a2d847c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732148948953 2024-11-21T00:29:09,742 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f0924278f7e4536add1c40be8a393fa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732148947291 2024-11-21T00:29:09,743 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting d26815f4c57b46a4aa44b3f79e96216a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732148947675 2024-11-21T00:29:09,744 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86576a87409b41adb5dbbf0251dab9e3, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732148948935 2024-11-21T00:29:09,765 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:09,765 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/c02164366e0b4bb0b25dd31b59c40432 is 50, key is test_row_0/C:col10/1732148948954/Put/seqid=0 2024-11-21T00:29:09,771 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#406 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:09,772 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/c01cf1435c2d48e6bdece9fcdb7abe29 is 50, key is test_row_0/A:col10/1732148948954/Put/seqid=0 2024-11-21T00:29:09,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:09,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-21T00:29:09,785 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:09,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:09,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:09,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:09,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:09,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:09,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:09,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149009796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149009805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149009805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149009806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149009807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742310_1486 (size=12207) 2024-11-21T00:29:09,830 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/c01cf1435c2d48e6bdece9fcdb7abe29 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/c01cf1435c2d48e6bdece9fcdb7abe29 2024-11-21T00:29:09,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/aeeaeb10b0c14435a54431fc01b0af07 is 50, key is test_row_0/A:col10/1732148949778/Put/seqid=0 2024-11-21T00:29:09,839 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into c01cf1435c2d48e6bdece9fcdb7abe29(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:09,839 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:09,839 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148949737; duration=0sec 2024-11-21T00:29:09,839 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:09,839 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:09,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742309_1485 (size=12207) 2024-11-21T00:29:09,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742311_1487 (size=16681) 2024-11-21T00:29:09,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/aeeaeb10b0c14435a54431fc01b0af07 2024-11-21T00:29:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-21T00:29:09,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149009919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/fece8372703f4446b0390a0276966d0f is 50, key is test_row_0/B:col10/1732148949778/Put/seqid=0 2024-11-21T00:29:09,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149009919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149009920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149009927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:09,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:09,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:09,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:09,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:09,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742312_1488 (size=12001) 2024-11-21T00:29:10,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:10,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:10,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149010134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149010147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149010147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149010151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,282 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/c02164366e0b4bb0b25dd31b59c40432 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c02164366e0b4bb0b25dd31b59c40432 2024-11-21T00:29:10,284 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:10,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:10,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,317 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into c02164366e0b4bb0b25dd31b59c40432(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:10,317 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:10,317 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=13, startTime=1732148949738; duration=0sec 2024-11-21T00:29:10,317 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:10,317 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:10,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149010310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/fece8372703f4446b0390a0276966d0f 2024-11-21T00:29:10,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/69bc632db5374be8b92acc4ef9c73835 is 50, key is test_row_0/C:col10/1732148949778/Put/seqid=0 2024-11-21T00:29:10,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-21T00:29:10,438 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149010443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:10,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:10,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:10,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149010461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149010462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149010462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742313_1489 (size=12001) 2024-11-21T00:29:10,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/69bc632db5374be8b92acc4ef9c73835 2024-11-21T00:29:10,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/aeeaeb10b0c14435a54431fc01b0af07 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/aeeaeb10b0c14435a54431fc01b0af07 2024-11-21T00:29:10,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/aeeaeb10b0c14435a54431fc01b0af07, entries=250, sequenceid=117, filesize=16.3 K 2024-11-21T00:29:10,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/fece8372703f4446b0390a0276966d0f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/fece8372703f4446b0390a0276966d0f 2024-11-21T00:29:10,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/fece8372703f4446b0390a0276966d0f, entries=150, sequenceid=117, filesize=11.7 K 2024-11-21T00:29:10,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/69bc632db5374be8b92acc4ef9c73835 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/69bc632db5374be8b92acc4ef9c73835 2024-11-21T00:29:10,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/69bc632db5374be8b92acc4ef9c73835, entries=150, sequenceid=117, filesize=11.7 K 2024-11-21T00:29:10,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 21e23f12556a7b945d55cc2f3dac60b0 in 792ms, sequenceid=117, compaction requested=true 2024-11-21T00:29:10,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:10,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:10,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:10,570 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:29:10,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:10,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:10,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:10,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:29:10,570 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:10,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:29:10,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:29:10,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. because compaction request was cancelled 2024-11-21T00:29:10,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:10,571 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-21T00:29:10,572 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-21T00:29:10,572 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-21T00:29:10,572 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. because compaction request was cancelled 2024-11-21T00:29:10,572 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:10,573 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:10,573 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:10,574 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,574 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/568b4a34b69749e1a211e0de434b5560, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/9eba807bb4474b0983734ccacc765489, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/fece8372703f4446b0390a0276966d0f] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=35.3 K 2024-11-21T00:29:10,574 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 568b4a34b69749e1a211e0de434b5560, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732148947675 2024-11-21T00:29:10,574 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9eba807bb4474b0983734ccacc765489, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732148948953 2024-11-21T00:29:10,574 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting fece8372703f4446b0390a0276966d0f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732148949104 2024-11-21T00:29:10,605 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:10,605 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#410 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:10,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-21T00:29:10,605 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/48106425f6ef410ea3dba6cb27d326ef is 50, key is test_row_0/B:col10/1732148949778/Put/seqid=0 2024-11-21T00:29:10,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:10,605 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-21T00:29:10,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:10,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:10,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:10,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:10,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:10,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:10,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/1313266f48894ae89e8388edad4cbbda is 50, key is test_row_0/A:col10/1732148949806/Put/seqid=0 2024-11-21T00:29:10,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742314_1490 (size=12241) 2024-11-21T00:29:10,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742315_1491 (size=12001) 2024-11-21T00:29:10,689 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/1313266f48894ae89e8388edad4cbbda 2024-11-21T00:29:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/ca8039c595bd4c0598131309f88787fd is 50, key is test_row_0/B:col10/1732148949806/Put/seqid=0 2024-11-21T00:29:10,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742316_1492 (size=12001) 2024-11-21T00:29:10,749 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/ca8039c595bd4c0598131309f88787fd 2024-11-21T00:29:10,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fe95cb9375f248669d62e2559d5c8dd0 is 50, key is test_row_0/C:col10/1732148949806/Put/seqid=0 2024-11-21T00:29:10,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742317_1493 (size=12001) 2024-11-21T00:29:10,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:10,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:11,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149011019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149011035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149011036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149011039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,054 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/48106425f6ef410ea3dba6cb27d326ef as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/48106425f6ef410ea3dba6cb27d326ef 2024-11-21T00:29:11,059 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 48106425f6ef410ea3dba6cb27d326ef(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:11,060 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:11,060 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148950570; duration=0sec 2024-11-21T00:29:11,060 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:11,060 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:11,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149011139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149011154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149011155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149011159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,212 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fe95cb9375f248669d62e2559d5c8dd0 2024-11-21T00:29:11,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/1313266f48894ae89e8388edad4cbbda as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1313266f48894ae89e8388edad4cbbda 2024-11-21T00:29:11,227 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1313266f48894ae89e8388edad4cbbda, entries=150, sequenceid=127, filesize=11.7 K 2024-11-21T00:29:11,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/ca8039c595bd4c0598131309f88787fd as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/ca8039c595bd4c0598131309f88787fd 2024-11-21T00:29:11,235 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/ca8039c595bd4c0598131309f88787fd, entries=150, sequenceid=127, filesize=11.7 K 2024-11-21T00:29:11,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fe95cb9375f248669d62e2559d5c8dd0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe95cb9375f248669d62e2559d5c8dd0 2024-11-21T00:29:11,247 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe95cb9375f248669d62e2559d5c8dd0, entries=150, sequenceid=127, filesize=11.7 K 2024-11-21T00:29:11,251 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 21e23f12556a7b945d55cc2f3dac60b0 in 646ms, sequenceid=127, compaction requested=true 2024-11-21T00:29:11,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:11,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:11,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-21T00:29:11,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-21T00:29:11,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-21T00:29:11,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9280 sec 2024-11-21T00:29:11,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.9310 sec 2024-11-21T00:29:11,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-21T00:29:11,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:11,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:11,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:11,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:11,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:11,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:11,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/73ac0faa20724aeba1837171771d4ff4 is 50, key is test_row_0/A:col10/1732148951035/Put/seqid=0 2024-11-21T00:29:11,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149011379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149011381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149011381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149011388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149011388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742318_1494 (size=14541) 2024-11-21T00:29:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-21T00:29:11,436 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-21T00:29:11,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-21T00:29:11,443 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:11,444 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:11,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-21T00:29:11,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149011490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149011500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149011504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-21T00:29:11,598 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-21T00:29:11,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:11,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:11,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:11,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149011691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149011691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149011699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149011710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149011723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-21T00:29:11,762 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-21T00:29:11,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:11,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:11,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:11,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/73ac0faa20724aeba1837171771d4ff4 2024-11-21T00:29:11,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/d778c371c7264c94ab32ff47693815f2 is 50, key is test_row_0/B:col10/1732148951035/Put/seqid=0 2024-11-21T00:29:11,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742319_1495 (size=12151) 2024-11-21T00:29:11,929 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:11,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-21T00:29:11,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:11,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:11,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:11,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149012020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149012035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149012036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-21T00:29:12,091 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-21T00:29:12,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:12,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149012211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149012214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,253 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-21T00:29:12,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:12,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/d778c371c7264c94ab32ff47693815f2 2024-11-21T00:29:12,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/aad32fa6b8b54591957e203b1ef56daa is 50, key is test_row_0/C:col10/1732148951035/Put/seqid=0 2024-11-21T00:29:12,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742320_1496 (size=12151) 2024-11-21T00:29:12,406 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-21T00:29:12,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:12,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,407 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:12,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/aad32fa6b8b54591957e203b1ef56daa 2024-11-21T00:29:12,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/73ac0faa20724aeba1837171771d4ff4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/73ac0faa20724aeba1837171771d4ff4 2024-11-21T00:29:12,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/73ac0faa20724aeba1837171771d4ff4, entries=200, sequenceid=157, filesize=14.2 K 2024-11-21T00:29:12,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/d778c371c7264c94ab32ff47693815f2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/d778c371c7264c94ab32ff47693815f2 2024-11-21T00:29:12,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/d778c371c7264c94ab32ff47693815f2, entries=150, sequenceid=157, filesize=11.9 K 2024-11-21T00:29:12,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/aad32fa6b8b54591957e203b1ef56daa as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/aad32fa6b8b54591957e203b1ef56daa 2024-11-21T00:29:12,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/aad32fa6b8b54591957e203b1ef56daa, entries=150, sequenceid=157, filesize=11.9 K 2024-11-21T00:29:12,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 21e23f12556a7b945d55cc2f3dac60b0 in 1093ms, sequenceid=157, compaction requested=true 2024-11-21T00:29:12,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:12,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:12,436 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:12,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:12,436 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:12,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:12,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:12,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:12,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:12,440 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:12,440 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:12,440 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,441 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/48106425f6ef410ea3dba6cb27d326ef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/ca8039c595bd4c0598131309f88787fd, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/d778c371c7264c94ab32ff47693815f2] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=35.5 K 2024-11-21T00:29:12,441 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55430 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:12,441 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:12,441 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,441 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/c01cf1435c2d48e6bdece9fcdb7abe29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/aeeaeb10b0c14435a54431fc01b0af07, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1313266f48894ae89e8388edad4cbbda, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/73ac0faa20724aeba1837171771d4ff4] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=54.1 K 2024-11-21T00:29:12,443 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c01cf1435c2d48e6bdece9fcdb7abe29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732148948953 2024-11-21T00:29:12,443 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 48106425f6ef410ea3dba6cb27d326ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732148949104 2024-11-21T00:29:12,444 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ca8039c595bd4c0598131309f88787fd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732148949789 2024-11-21T00:29:12,444 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting aeeaeb10b0c14435a54431fc01b0af07, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732148949104 2024-11-21T00:29:12,444 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1313266f48894ae89e8388edad4cbbda, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732148949789 2024-11-21T00:29:12,445 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d778c371c7264c94ab32ff47693815f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732148951016 2024-11-21T00:29:12,445 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73ac0faa20724aeba1837171771d4ff4, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732148951016 2024-11-21T00:29:12,489 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:12,490 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/4ff2b1fd15a34431a42e39a4f67ffb19 is 50, key is test_row_0/B:col10/1732148951035/Put/seqid=0 2024-11-21T00:29:12,506 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#418 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:12,506 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/34c6583b3f554379a7b9d17ce3577540 is 50, key is test_row_0/A:col10/1732148951035/Put/seqid=0 2024-11-21T00:29:12,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742321_1497 (size=12493) 2024-11-21T00:29:12,543 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/4ff2b1fd15a34431a42e39a4f67ffb19 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ff2b1fd15a34431a42e39a4f67ffb19 2024-11-21T00:29:12,550 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 4ff2b1fd15a34431a42e39a4f67ffb19(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:12,550 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:12,550 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148952436; duration=0sec 2024-11-21T00:29:12,550 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:12,550 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:12,551 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:12,552 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:12,552 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:12,552 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,552 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c02164366e0b4bb0b25dd31b59c40432, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/69bc632db5374be8b92acc4ef9c73835, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe95cb9375f248669d62e2559d5c8dd0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/aad32fa6b8b54591957e203b1ef56daa] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=47.2 K 2024-11-21T00:29:12,552 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c02164366e0b4bb0b25dd31b59c40432, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732148948953 2024-11-21T00:29:12,552 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 69bc632db5374be8b92acc4ef9c73835, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732148949104 2024-11-21T00:29:12,553 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fe95cb9375f248669d62e2559d5c8dd0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732148949789 2024-11-21T00:29:12,553 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting aad32fa6b8b54591957e203b1ef56daa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732148951016 2024-11-21T00:29:12,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-21T00:29:12,559 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-21T00:29:12,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:12,561 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-21T00:29:12,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:12,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:12,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:12,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:12,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:12,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:12,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:12,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:12,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742322_1498 (size=12493) 2024-11-21T00:29:12,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/279e9dbaae0c4cba84762baca3505a32 is 50, key is test_row_0/A:col10/1732148951377/Put/seqid=0 2024-11-21T00:29:12,591 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#420 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:12,591 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/7d35f64532f6451ba229efd5dd9af5d7 is 50, key is test_row_0/C:col10/1732148951035/Put/seqid=0 2024-11-21T00:29:12,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742323_1499 (size=14541) 2024-11-21T00:29:12,634 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/279e9dbaae0c4cba84762baca3505a32 2024-11-21T00:29:12,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742324_1500 (size=12493) 2024-11-21T00:29:12,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/c85d50e1931b48afb7d5ebb7aa3a7de9 is 50, key is test_row_0/B:col10/1732148951377/Put/seqid=0 2024-11-21T00:29:12,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149012669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149012674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149012676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742325_1501 (size=12151) 2024-11-21T00:29:12,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149012777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149012782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149012784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:12,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149012980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,000 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/34c6583b3f554379a7b9d17ce3577540 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/34c6583b3f554379a7b9d17ce3577540 2024-11-21T00:29:13,010 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into 34c6583b3f554379a7b9d17ce3577540(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:13,010 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:13,010 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=12, startTime=1732148952436; duration=0sec 2024-11-21T00:29:13,010 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:13,010 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:13,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149013000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149012996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,074 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/7d35f64532f6451ba229efd5dd9af5d7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/7d35f64532f6451ba229efd5dd9af5d7 2024-11-21T00:29:13,081 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into 7d35f64532f6451ba229efd5dd9af5d7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:13,081 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:13,081 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=12, startTime=1732148952436; duration=0sec 2024-11-21T00:29:13,081 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:13,081 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:13,103 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/c85d50e1931b48afb7d5ebb7aa3a7de9 2024-11-21T00:29:13,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/01ee0bf5994b4da594726e968c3532ac is 50, key is test_row_0/C:col10/1732148951377/Put/seqid=0 2024-11-21T00:29:13,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742326_1502 (size=12151) 2024-11-21T00:29:13,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149013230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149013230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149013294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149013314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149013315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-21T00:29:13,576 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/01ee0bf5994b4da594726e968c3532ac 2024-11-21T00:29:13,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/279e9dbaae0c4cba84762baca3505a32 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/279e9dbaae0c4cba84762baca3505a32 2024-11-21T00:29:13,610 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/279e9dbaae0c4cba84762baca3505a32, entries=200, sequenceid=168, filesize=14.2 K 2024-11-21T00:29:13,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/c85d50e1931b48afb7d5ebb7aa3a7de9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/c85d50e1931b48afb7d5ebb7aa3a7de9 2024-11-21T00:29:13,618 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/c85d50e1931b48afb7d5ebb7aa3a7de9, entries=150, sequenceid=168, filesize=11.9 K 2024-11-21T00:29:13,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/01ee0bf5994b4da594726e968c3532ac as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/01ee0bf5994b4da594726e968c3532ac 2024-11-21T00:29:13,630 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/01ee0bf5994b4da594726e968c3532ac, entries=150, sequenceid=168, filesize=11.9 K 2024-11-21T00:29:13,631 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 21e23f12556a7b945d55cc2f3dac60b0 in 1070ms, sequenceid=168, compaction requested=false 2024-11-21T00:29:13,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:13,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:13,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-21T00:29:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-21T00:29:13,634 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-21T00:29:13,634 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1880 sec 2024-11-21T00:29:13,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.1920 sec 2024-11-21T00:29:13,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-21T00:29:13,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:13,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:13,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:13,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:13,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:13,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:13,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/9b931e20b52e41b88cc0e77c8e73b5a6 is 50, key is test_row_0/A:col10/1732148952674/Put/seqid=0 2024-11-21T00:29:13,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149013836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149013844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149013846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742327_1503 (size=14541) 2024-11-21T00:29:13,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149013949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149013958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:13,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149013961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:14,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149014166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:14,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149014171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:14,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149014171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:14,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/9b931e20b52e41b88cc0e77c8e73b5a6 2024-11-21T00:29:14,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/0cabcc9e5d3b439bac9c0b3f9580e05c is 50, key is test_row_0/B:col10/1732148952674/Put/seqid=0 2024-11-21T00:29:14,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742328_1504 (size=12151) 2024-11-21T00:29:14,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149014484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:14,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:14,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149014507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:14,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:14,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149014507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:14,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/0cabcc9e5d3b439bac9c0b3f9580e05c 2024-11-21T00:29:14,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/1a2ed17b187f4258984fb6bdb755a6a5 is 50, key is test_row_0/C:col10/1732148952674/Put/seqid=0 2024-11-21T00:29:14,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742329_1505 (size=12151) 2024-11-21T00:29:14,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/1a2ed17b187f4258984fb6bdb755a6a5 2024-11-21T00:29:14,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/9b931e20b52e41b88cc0e77c8e73b5a6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/9b931e20b52e41b88cc0e77c8e73b5a6 2024-11-21T00:29:14,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/9b931e20b52e41b88cc0e77c8e73b5a6, entries=200, sequenceid=197, filesize=14.2 K 2024-11-21T00:29:14,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/0cabcc9e5d3b439bac9c0b3f9580e05c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0cabcc9e5d3b439bac9c0b3f9580e05c 2024-11-21T00:29:14,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0cabcc9e5d3b439bac9c0b3f9580e05c, entries=150, sequenceid=197, filesize=11.9 K 2024-11-21T00:29:14,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/1a2ed17b187f4258984fb6bdb755a6a5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1a2ed17b187f4258984fb6bdb755a6a5 2024-11-21T00:29:14,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1a2ed17b187f4258984fb6bdb755a6a5, entries=150, sequenceid=197, filesize=11.9 K 2024-11-21T00:29:14,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 21e23f12556a7b945d55cc2f3dac60b0 in 1073ms, sequenceid=197, compaction requested=true 2024-11-21T00:29:14,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:14,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:14,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:14,883 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:14,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:14,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:14,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:14,883 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:14,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:14,888 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:14,888 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41575 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:14,888 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:14,888 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:14,888 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:14,888 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/34c6583b3f554379a7b9d17ce3577540, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/279e9dbaae0c4cba84762baca3505a32, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/9b931e20b52e41b88cc0e77c8e73b5a6] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=40.6 K 2024-11-21T00:29:14,888 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:14,888 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ff2b1fd15a34431a42e39a4f67ffb19, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/c85d50e1931b48afb7d5ebb7aa3a7de9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0cabcc9e5d3b439bac9c0b3f9580e05c] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=35.9 K 2024-11-21T00:29:14,888 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34c6583b3f554379a7b9d17ce3577540, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732148951016 2024-11-21T00:29:14,889 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ff2b1fd15a34431a42e39a4f67ffb19, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732148951016 2024-11-21T00:29:14,889 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 279e9dbaae0c4cba84762baca3505a32, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732148951372 2024-11-21T00:29:14,895 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c85d50e1931b48afb7d5ebb7aa3a7de9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732148951372 2024-11-21T00:29:14,895 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b931e20b52e41b88cc0e77c8e73b5a6, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732148952636 2024-11-21T00:29:14,895 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cabcc9e5d3b439bac9c0b3f9580e05c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732148952636 2024-11-21T00:29:14,929 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#426 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:14,929 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/7573134e9ed048228b42574ea3ff4906 is 50, key is test_row_0/A:col10/1732148952674/Put/seqid=0 2024-11-21T00:29:14,942 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#427 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:14,942 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/0d24dbf58b4f4cfda7b78d429591a930 is 50, key is test_row_0/B:col10/1732148952674/Put/seqid=0 2024-11-21T00:29:15,313 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_407452427_22 at /127.0.0.1:60544 [Receiving block BP-364493626-172.17.0.2-1732148814546:blk_1073741833_1009] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 311ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/dfs/data/data1/, blockId=1073741833, seqno=1753 2024-11-21T00:29:15,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742330_1506 (size=12595) 2024-11-21T00:29:15,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742331_1507 (size=12595) 2024-11-21T00:29:15,316 INFO [AsyncFSWAL-0-hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f-prefix:0e7930017ff8,37961,1732148819586 {}] wal.AbstractFSWAL(1183): Slow sync cost: 314 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39227,DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3,DISK]] 2024-11-21T00:29:15,316 INFO [AsyncFSWAL-0-hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f-prefix:0e7930017ff8,37961,1732148819586 {}] wal.AbstractFSWAL(1183): Slow sync cost: 293 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39227,DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3,DISK]] 2024-11-21T00:29:15,316 INFO [AsyncFSWAL-0-hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f-prefix:0e7930017ff8,37961,1732148819586 {}] wal.AbstractFSWAL(1183): Slow sync cost: 284 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39227,DS-a61f25bc-f3dc-47fa-8c68-22e3217fe1b3,DISK]] 2024-11-21T00:29:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:15,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:29:15,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:15,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:15,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:15,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:15,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:15,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:15,333 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/7573134e9ed048228b42574ea3ff4906 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7573134e9ed048228b42574ea3ff4906 2024-11-21T00:29:15,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/4b0a08646e324382840256322e291e93 is 50, key is test_row_0/A:col10/1732148955031/Put/seqid=0 2024-11-21T00:29:15,346 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into 7573134e9ed048228b42574ea3ff4906(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:15,346 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:15,346 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148954883; duration=0sec 2024-11-21T00:29:15,346 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:15,346 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:15,346 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:15,349 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:15,349 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:15,349 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:15,349 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/7d35f64532f6451ba229efd5dd9af5d7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/01ee0bf5994b4da594726e968c3532ac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1a2ed17b187f4258984fb6bdb755a6a5] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=35.9 K 2024-11-21T00:29:15,350 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d35f64532f6451ba229efd5dd9af5d7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732148951016 2024-11-21T00:29:15,350 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01ee0bf5994b4da594726e968c3532ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732148951372 2024-11-21T00:29:15,350 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a2ed17b187f4258984fb6bdb755a6a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732148952636 2024-11-21T00:29:15,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742332_1508 (size=16931) 2024-11-21T00:29:15,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/4b0a08646e324382840256322e291e93 2024-11-21T00:29:15,359 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:15,359 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b2415a7450d544d9a10504e69349b3fe is 50, key is test_row_0/C:col10/1732148952674/Put/seqid=0 2024-11-21T00:29:15,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/dd1d876abcd94447ae622ecd79e483d0 is 50, key is test_row_0/B:col10/1732148955031/Put/seqid=0 2024-11-21T00:29:15,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742333_1509 (size=12595) 2024-11-21T00:29:15,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742334_1510 (size=12151) 2024-11-21T00:29:15,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/dd1d876abcd94447ae622ecd79e483d0 2024-11-21T00:29:15,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/72e4dbe5e8144899aa3fcfb6bfa67a7d is 50, key is test_row_0/C:col10/1732148955031/Put/seqid=0 2024-11-21T00:29:15,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742335_1511 (size=12151) 2024-11-21T00:29:15,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149015378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149015379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149015385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/72e4dbe5e8144899aa3fcfb6bfa67a7d 2024-11-21T00:29:15,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149015388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149015391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/4b0a08646e324382840256322e291e93 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/4b0a08646e324382840256322e291e93 2024-11-21T00:29:15,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/4b0a08646e324382840256322e291e93, entries=250, sequenceid=209, filesize=16.5 K 2024-11-21T00:29:15,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/dd1d876abcd94447ae622ecd79e483d0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/dd1d876abcd94447ae622ecd79e483d0 2024-11-21T00:29:15,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/dd1d876abcd94447ae622ecd79e483d0, entries=150, sequenceid=209, filesize=11.9 K 2024-11-21T00:29:15,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/72e4dbe5e8144899aa3fcfb6bfa67a7d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72e4dbe5e8144899aa3fcfb6bfa67a7d 2024-11-21T00:29:15,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72e4dbe5e8144899aa3fcfb6bfa67a7d, entries=150, sequenceid=209, filesize=11.9 K 2024-11-21T00:29:15,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 21e23f12556a7b945d55cc2f3dac60b0 in 103ms, sequenceid=209, compaction requested=false 2024-11-21T00:29:15,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:15,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:15,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:29:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:15,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/66aed2d34f2f4ec5a99ceff074a4f979 is 50, key is test_row_0/A:col10/1732148955496/Put/seqid=0 2024-11-21T00:29:15,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149015506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149015506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149015507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149015507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149015508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742336_1512 (size=14541) 2024-11-21T00:29:15,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/66aed2d34f2f4ec5a99ceff074a4f979 2024-11-21T00:29:15,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/84f2af6fa127446c87f84e5abeebb8d3 is 50, key is test_row_0/B:col10/1732148955496/Put/seqid=0 2024-11-21T00:29:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-21T00:29:15,557 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-21T00:29:15,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-21T00:29:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-21T00:29:15,566 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:15,566 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:15,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742337_1513 (size=12151) 2024-11-21T00:29:15,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149015616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149015617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149015619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149015620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149015620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-21T00:29:15,718 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-21T00:29:15,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:15,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:15,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:15,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:15,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:15,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:15,735 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/0d24dbf58b4f4cfda7b78d429591a930 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0d24dbf58b4f4cfda7b78d429591a930 2024-11-21T00:29:15,772 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 0d24dbf58b4f4cfda7b78d429591a930(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:15,772 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:15,772 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148954883; duration=0sec 2024-11-21T00:29:15,772 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:15,772 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:15,816 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b2415a7450d544d9a10504e69349b3fe as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b2415a7450d544d9a10504e69349b3fe 2024-11-21T00:29:15,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149015822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,825 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into b2415a7450d544d9a10504e69349b3fe(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:15,825 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:15,825 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=13, startTime=1732148954883; duration=0sec 2024-11-21T00:29:15,825 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:15,825 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:15,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149015825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149015831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149015835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:15,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149015838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:15,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-21T00:29:15,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-21T00:29:15,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:15,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:15,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:15,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:15,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:15,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:15,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/84f2af6fa127446c87f84e5abeebb8d3 2024-11-21T00:29:15,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/d75f1e89906845858355a8c653ac8c4f is 50, key is test_row_0/C:col10/1732148955496/Put/seqid=0 2024-11-21T00:29:16,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-21T00:29:16,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:16,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742338_1514 (size=12151) 2024-11-21T00:29:16,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149016127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149016127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149016141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149016145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149016145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-21T00:29:16,192 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-21T00:29:16,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:16,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,352 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-21T00:29:16,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:16,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:16,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/d75f1e89906845858355a8c653ac8c4f 2024-11-21T00:29:16,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/66aed2d34f2f4ec5a99ceff074a4f979 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/66aed2d34f2f4ec5a99ceff074a4f979 2024-11-21T00:29:16,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/66aed2d34f2f4ec5a99ceff074a4f979, entries=200, sequenceid=237, filesize=14.2 K 2024-11-21T00:29:16,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/84f2af6fa127446c87f84e5abeebb8d3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/84f2af6fa127446c87f84e5abeebb8d3 2024-11-21T00:29:16,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/84f2af6fa127446c87f84e5abeebb8d3, entries=150, sequenceid=237, filesize=11.9 K 2024-11-21T00:29:16,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/d75f1e89906845858355a8c653ac8c4f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/d75f1e89906845858355a8c653ac8c4f 2024-11-21T00:29:16,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/d75f1e89906845858355a8c653ac8c4f, entries=150, sequenceid=237, filesize=11.9 K 2024-11-21T00:29:16,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 21e23f12556a7b945d55cc2f3dac60b0 in 984ms, sequenceid=237, compaction requested=true 2024-11-21T00:29:16,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:16,480 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:16,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:16,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:16,481 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44067 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:16,481 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:16,482 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,482 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7573134e9ed048228b42574ea3ff4906, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/4b0a08646e324382840256322e291e93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/66aed2d34f2f4ec5a99ceff074a4f979] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=43.0 K 2024-11-21T00:29:16,482 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:16,482 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7573134e9ed048228b42574ea3ff4906, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732148952636 2024-11-21T00:29:16,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:16,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:16,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:16,483 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:16,483 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:16,483 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,483 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0d24dbf58b4f4cfda7b78d429591a930, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/dd1d876abcd94447ae622ecd79e483d0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/84f2af6fa127446c87f84e5abeebb8d3] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=36.0 K 2024-11-21T00:29:16,483 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b0a08646e324382840256322e291e93, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732148953843 2024-11-21T00:29:16,483 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d24dbf58b4f4cfda7b78d429591a930, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732148952636 2024-11-21T00:29:16,483 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66aed2d34f2f4ec5a99ceff074a4f979, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732148955386 2024-11-21T00:29:16,484 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting dd1d876abcd94447ae622ecd79e483d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732148953844 2024-11-21T00:29:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:16,484 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 84f2af6fa127446c87f84e5abeebb8d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732148955386 2024-11-21T00:29:16,501 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#435 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:16,502 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/7fc990ac0e884a8ca47cb8b14ad397e3 is 50, key is test_row_0/B:col10/1732148955496/Put/seqid=0 2024-11-21T00:29:16,503 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#436 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:16,503 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/804940a9ff0c43d4ad527ee5705c01c6 is 50, key is test_row_0/A:col10/1732148955496/Put/seqid=0 2024-11-21T00:29:16,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-21T00:29:16,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,507 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-21T00:29:16,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:16,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:16,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:16,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:16,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:16,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:16,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/3e939841ce4f41ec95dbe904636b95e6 is 50, key is test_row_1/A:col10/1732148955507/Put/seqid=0 2024-11-21T00:29:16,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742339_1515 (size=12697) 2024-11-21T00:29:16,533 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/7fc990ac0e884a8ca47cb8b14ad397e3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/7fc990ac0e884a8ca47cb8b14ad397e3 2024-11-21T00:29:16,540 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 7fc990ac0e884a8ca47cb8b14ad397e3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:16,540 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:16,540 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148956482; duration=0sec 2024-11-21T00:29:16,540 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:16,540 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:16,540 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:16,543 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:16,543 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:16,543 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:16,543 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b2415a7450d544d9a10504e69349b3fe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72e4dbe5e8144899aa3fcfb6bfa67a7d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/d75f1e89906845858355a8c653ac8c4f] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=36.0 K 2024-11-21T00:29:16,544 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b2415a7450d544d9a10504e69349b3fe, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732148952636 2024-11-21T00:29:16,544 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 72e4dbe5e8144899aa3fcfb6bfa67a7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732148953844 2024-11-21T00:29:16,544 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d75f1e89906845858355a8c653ac8c4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732148955386 2024-11-21T00:29:16,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742340_1516 (size=12697) 2024-11-21T00:29:16,574 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#438 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:16,574 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/72d5cd8e3fa54169b94c247034a64b35 is 50, key is test_row_0/C:col10/1732148955496/Put/seqid=0 2024-11-21T00:29:16,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742341_1517 (size=9757) 2024-11-21T00:29:16,588 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/3e939841ce4f41ec95dbe904636b95e6 2024-11-21T00:29:16,595 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/804940a9ff0c43d4ad527ee5705c01c6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/804940a9ff0c43d4ad527ee5705c01c6 2024-11-21T00:29:16,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742342_1518 (size=12697) 2024-11-21T00:29:16,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/5ef94a752b424f2d9055b0c20e747e28 is 50, key is test_row_1/B:col10/1732148955507/Put/seqid=0 2024-11-21T00:29:16,612 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/72d5cd8e3fa54169b94c247034a64b35 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72d5cd8e3fa54169b94c247034a64b35 2024-11-21T00:29:16,615 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into 804940a9ff0c43d4ad527ee5705c01c6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:16,615 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:16,615 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148956480; duration=0sec 2024-11-21T00:29:16,615 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:16,615 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:16,627 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into 72d5cd8e3fa54169b94c247034a64b35(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:16,627 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:16,627 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=13, startTime=1732148956482; duration=0sec 2024-11-21T00:29:16,627 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:16,627 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:16,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742343_1519 (size=9757) 2024-11-21T00:29:16,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:16,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:16,646 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/5ef94a752b424f2d9055b0c20e747e28 2024-11-21T00:29:16,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b0bcd5ce21cc4650945fd96b448f57c9 is 50, key is test_row_1/C:col10/1732148955507/Put/seqid=0 2024-11-21T00:29:16,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-21T00:29:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742344_1520 (size=9757) 2024-11-21T00:29:16,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149016700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149016701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149016706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149016717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149016720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149016819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149016819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149016819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149016833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:16,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:16,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149016833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149017025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149017025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149017032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149017043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149017045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,110 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b0bcd5ce21cc4650945fd96b448f57c9 2024-11-21T00:29:17,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/3e939841ce4f41ec95dbe904636b95e6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/3e939841ce4f41ec95dbe904636b95e6 2024-11-21T00:29:17,141 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/3e939841ce4f41ec95dbe904636b95e6, entries=100, sequenceid=248, filesize=9.5 K 2024-11-21T00:29:17,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/5ef94a752b424f2d9055b0c20e747e28 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5ef94a752b424f2d9055b0c20e747e28 2024-11-21T00:29:17,152 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5ef94a752b424f2d9055b0c20e747e28, entries=100, sequenceid=248, filesize=9.5 K 2024-11-21T00:29:17,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/b0bcd5ce21cc4650945fd96b448f57c9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b0bcd5ce21cc4650945fd96b448f57c9 2024-11-21T00:29:17,172 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b0bcd5ce21cc4650945fd96b448f57c9, entries=100, sequenceid=248, filesize=9.5 K 2024-11-21T00:29:17,172 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 21e23f12556a7b945d55cc2f3dac60b0 in 665ms, sequenceid=248, compaction requested=false 2024-11-21T00:29:17,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:17,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:17,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-21T00:29:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-21T00:29:17,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-21T00:29:17,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6080 sec 2024-11-21T00:29:17,176 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.6150 sec 2024-11-21T00:29:17,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:17,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-21T00:29:17,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:17,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:17,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:17,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:17,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:17,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:17,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/ada733c79b3d4cbcab30f0765072b244 is 50, key is test_row_0/A:col10/1732148956690/Put/seqid=0 2024-11-21T00:29:17,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149017336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149017336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149017339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149017351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149017353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742345_1521 (size=14741) 2024-11-21T00:29:17,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149017445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149017445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149017652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149017652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-21T00:29:17,688 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-21T00:29:17,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:17,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-21T00:29:17,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-21T00:29:17,690 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:17,691 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:17,691 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:17,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/ada733c79b3d4cbcab30f0765072b244 2024-11-21T00:29:17,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8c2002237c614a70a6dbce81aa443ee9 is 50, key is test_row_0/B:col10/1732148956690/Put/seqid=0 2024-11-21T00:29:17,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742346_1522 (size=12301) 2024-11-21T00:29:17,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8c2002237c614a70a6dbce81aa443ee9 2024-11-21T00:29:17,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fb927b93660243ddb187fcf492768025 is 50, key is test_row_0/C:col10/1732148956690/Put/seqid=0 2024-11-21T00:29:17,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-21T00:29:17,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742347_1523 (size=12301) 2024-11-21T00:29:17,842 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-21T00:29:17,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:17,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:17,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:17,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:17,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:17,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149017849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149017859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149017863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149017956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:17,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149017957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-21T00:29:17,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:17,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-21T00:29:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:17,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:17,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:18,158 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-21T00:29:18,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:18,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:18,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:18,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:18,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:18,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:18,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fb927b93660243ddb187fcf492768025 2024-11-21T00:29:18,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/ada733c79b3d4cbcab30f0765072b244 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ada733c79b3d4cbcab30f0765072b244 2024-11-21T00:29:18,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ada733c79b3d4cbcab30f0765072b244, entries=200, sequenceid=280, filesize=14.4 K 2024-11-21T00:29:18,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8c2002237c614a70a6dbce81aa443ee9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8c2002237c614a70a6dbce81aa443ee9 2024-11-21T00:29:18,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8c2002237c614a70a6dbce81aa443ee9, entries=150, sequenceid=280, filesize=12.0 K 2024-11-21T00:29:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/fb927b93660243ddb187fcf492768025 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fb927b93660243ddb187fcf492768025 2024-11-21T00:29:18,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fb927b93660243ddb187fcf492768025, entries=150, sequenceid=280, filesize=12.0 K 2024-11-21T00:29:18,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 21e23f12556a7b945d55cc2f3dac60b0 in 911ms, sequenceid=280, compaction requested=true 2024-11-21T00:29:18,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:18,242 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:18,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:18,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:18,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:18,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:18,242 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:18,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:18,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:18,242 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:18,242 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:18,243 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:18,243 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/804940a9ff0c43d4ad527ee5705c01c6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/3e939841ce4f41ec95dbe904636b95e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ada733c79b3d4cbcab30f0765072b244] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=36.3 K 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:18,243 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:18,243 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/7fc990ac0e884a8ca47cb8b14ad397e3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5ef94a752b424f2d9055b0c20e747e28, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8c2002237c614a70a6dbce81aa443ee9] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=33.9 K 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 804940a9ff0c43d4ad527ee5705c01c6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732148955386 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fc990ac0e884a8ca47cb8b14ad397e3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732148955386 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e939841ce4f41ec95dbe904636b95e6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732148955503 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ef94a752b424f2d9055b0c20e747e28, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732148955503 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ada733c79b3d4cbcab30f0765072b244, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732148956690 2024-11-21T00:29:18,243 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c2002237c614a70a6dbce81aa443ee9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732148956690 2024-11-21T00:29:18,260 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#444 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:18,260 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#445 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:18,261 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/76ea504f33f14abf80569322014cd2df is 50, key is test_row_0/B:col10/1732148956690/Put/seqid=0 2024-11-21T00:29:18,261 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/0efde73accd14a678c02f092bfbc9d93 is 50, key is test_row_0/A:col10/1732148956690/Put/seqid=0 2024-11-21T00:29:18,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742348_1524 (size=12949) 2024-11-21T00:29:18,272 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/76ea504f33f14abf80569322014cd2df as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/76ea504f33f14abf80569322014cd2df 2024-11-21T00:29:18,277 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 76ea504f33f14abf80569322014cd2df(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:18,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742349_1525 (size=12949) 2024-11-21T00:29:18,277 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:18,277 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148958242; duration=0sec 2024-11-21T00:29:18,277 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:18,277 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:18,277 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:18,284 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:18,284 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:18,284 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:18,284 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72d5cd8e3fa54169b94c247034a64b35, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b0bcd5ce21cc4650945fd96b448f57c9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fb927b93660243ddb187fcf492768025] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=33.9 K 2024-11-21T00:29:18,285 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 72d5cd8e3fa54169b94c247034a64b35, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732148955386 2024-11-21T00:29:18,285 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b0bcd5ce21cc4650945fd96b448f57c9, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732148955503 2024-11-21T00:29:18,285 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fb927b93660243ddb187fcf492768025, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732148956690 2024-11-21T00:29:18,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-21T00:29:18,294 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#446 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:18,295 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/3b6b5a00f0384e89ae78fcc23c044565 is 50, key is test_row_0/C:col10/1732148956690/Put/seqid=0 2024-11-21T00:29:18,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742350_1526 (size=12949) 2024-11-21T00:29:18,303 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/3b6b5a00f0384e89ae78fcc23c044565 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/3b6b5a00f0384e89ae78fcc23c044565 2024-11-21T00:29:18,310 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-21T00:29:18,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:18,311 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-21T00:29:18,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:18,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:18,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:18,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:18,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:18,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:18,318 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into 3b6b5a00f0384e89ae78fcc23c044565(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:18,318 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:18,318 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=13, startTime=1732148958242; duration=0sec 2024-11-21T00:29:18,318 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:18,318 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:18,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/786c8fa09d4e40e68d81950c88f2cc82 is 50, key is test_row_0/A:col10/1732148957333/Put/seqid=0 2024-11-21T00:29:18,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742351_1527 (size=12301) 2024-11-21T00:29:18,352 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/786c8fa09d4e40e68d81950c88f2cc82 2024-11-21T00:29:18,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8b9d5280c8d34bd28bbe7119b88e1891 is 50, key is test_row_0/B:col10/1732148957333/Put/seqid=0 2024-11-21T00:29:18,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742352_1528 (size=12301) 2024-11-21T00:29:18,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:18,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149018576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149018577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,686 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/0efde73accd14a678c02f092bfbc9d93 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0efde73accd14a678c02f092bfbc9d93 2024-11-21T00:29:18,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149018686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,692 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into 0efde73accd14a678c02f092bfbc9d93(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:18,692 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:18,692 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148958241; duration=0sec 2024-11-21T00:29:18,692 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:18,692 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:18,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149018695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-21T00:29:18,806 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8b9d5280c8d34bd28bbe7119b88e1891 2024-11-21T00:29:18,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/26b791f86dc943ebab60e5e2f82fbca0 is 50, key is test_row_0/C:col10/1732148957333/Put/seqid=0 2024-11-21T00:29:18,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742353_1529 (size=12301) 2024-11-21T00:29:18,852 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/26b791f86dc943ebab60e5e2f82fbca0 2024-11-21T00:29:18,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/786c8fa09d4e40e68d81950c88f2cc82 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/786c8fa09d4e40e68d81950c88f2cc82 2024-11-21T00:29:18,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149018857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,860 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/786c8fa09d4e40e68d81950c88f2cc82, entries=150, sequenceid=288, filesize=12.0 K 2024-11-21T00:29:18,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8b9d5280c8d34bd28bbe7119b88e1891 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8b9d5280c8d34bd28bbe7119b88e1891 2024-11-21T00:29:18,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149018866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,873 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8b9d5280c8d34bd28bbe7119b88e1891, entries=150, sequenceid=288, filesize=12.0 K 2024-11-21T00:29:18,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/26b791f86dc943ebab60e5e2f82fbca0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/26b791f86dc943ebab60e5e2f82fbca0 2024-11-21T00:29:18,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149018877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,880 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/26b791f86dc943ebab60e5e2f82fbca0, entries=150, sequenceid=288, filesize=12.0 K 2024-11-21T00:29:18,881 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 21e23f12556a7b945d55cc2f3dac60b0 in 569ms, sequenceid=288, compaction requested=false 2024-11-21T00:29:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-21T00:29:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-21T00:29:18,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-21T00:29:18,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1910 sec 2024-11-21T00:29:18,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.1950 sec 2024-11-21T00:29:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:18,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-21T00:29:18,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:18,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:18,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:18,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:18,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:18,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:18,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/058536168d9b48d8b6f49aa0c9d996b8 is 50, key is test_row_0/A:col10/1732148958551/Put/seqid=0 2024-11-21T00:29:18,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149018909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149018910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:18,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742354_1530 (size=14741) 2024-11-21T00:29:19,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149019012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149019013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149019217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149019221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/058536168d9b48d8b6f49aa0c9d996b8 2024-11-21T00:29:19,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/4ef90d41881c4b27834bcb5b5df3c1e2 is 50, key is test_row_0/B:col10/1732148958551/Put/seqid=0 2024-11-21T00:29:19,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742355_1531 (size=12301) 2024-11-21T00:29:19,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/4ef90d41881c4b27834bcb5b5df3c1e2 2024-11-21T00:29:19,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/f0ad2fd609464b5fbf53183481ec0c83 is 50, key is test_row_0/C:col10/1732148958551/Put/seqid=0 2024-11-21T00:29:19,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742356_1532 (size=12301) 2024-11-21T00:29:19,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/f0ad2fd609464b5fbf53183481ec0c83 2024-11-21T00:29:19,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/058536168d9b48d8b6f49aa0c9d996b8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/058536168d9b48d8b6f49aa0c9d996b8 2024-11-21T00:29:19,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/058536168d9b48d8b6f49aa0c9d996b8, entries=200, sequenceid=320, filesize=14.4 K 2024-11-21T00:29:19,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/4ef90d41881c4b27834bcb5b5df3c1e2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ef90d41881c4b27834bcb5b5df3c1e2 2024-11-21T00:29:19,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ef90d41881c4b27834bcb5b5df3c1e2, entries=150, sequenceid=320, filesize=12.0 K 2024-11-21T00:29:19,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/f0ad2fd609464b5fbf53183481ec0c83 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/f0ad2fd609464b5fbf53183481ec0c83 2024-11-21T00:29:19,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/f0ad2fd609464b5fbf53183481ec0c83, entries=150, sequenceid=320, filesize=12.0 K 2024-11-21T00:29:19,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for 21e23f12556a7b945d55cc2f3dac60b0 in 613ms, sequenceid=320, compaction requested=true 2024-11-21T00:29:19,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:19,508 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:19,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:19,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:19,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:19,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:19,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:19,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:29:19,509 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:19,509 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:19,509 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:19,509 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:19,510 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0efde73accd14a678c02f092bfbc9d93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/786c8fa09d4e40e68d81950c88f2cc82, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/058536168d9b48d8b6f49aa0c9d996b8] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=39.1 K 2024-11-21T00:29:19,510 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0efde73accd14a678c02f092bfbc9d93, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732148956690 2024-11-21T00:29:19,510 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:19,510 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 786c8fa09d4e40e68d81950c88f2cc82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148957332 2024-11-21T00:29:19,510 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:19,510 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:19,510 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/76ea504f33f14abf80569322014cd2df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8b9d5280c8d34bd28bbe7119b88e1891, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ef90d41881c4b27834bcb5b5df3c1e2] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=36.7 K 2024-11-21T00:29:19,514 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 058536168d9b48d8b6f49aa0c9d996b8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732148958551 2024-11-21T00:29:19,515 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76ea504f33f14abf80569322014cd2df, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732148956690 2024-11-21T00:29:19,516 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b9d5280c8d34bd28bbe7119b88e1891, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148957332 2024-11-21T00:29:19,519 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ef90d41881c4b27834bcb5b5df3c1e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732148958551 2024-11-21T00:29:19,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:19,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:29:19,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:19,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:19,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:19,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:19,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:19,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:19,552 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#453 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:19,553 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/e5071d75e4a84bd8aabde567e292af02 is 50, key is test_row_0/A:col10/1732148958551/Put/seqid=0 2024-11-21T00:29:19,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/de46ac1f17024742bfe38df7440f7239 is 50, key is test_row_0/A:col10/1732148959538/Put/seqid=0 2024-11-21T00:29:19,554 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#454 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:19,555 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8f607a49378c45978535a539bfeb0d83 is 50, key is test_row_0/B:col10/1732148958551/Put/seqid=0 2024-11-21T00:29:19,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742357_1533 (size=13051) 2024-11-21T00:29:19,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742359_1535 (size=13051) 2024-11-21T00:29:19,582 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/e5071d75e4a84bd8aabde567e292af02 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e5071d75e4a84bd8aabde567e292af02 2024-11-21T00:29:19,586 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into e5071d75e4a84bd8aabde567e292af02(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:19,586 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:19,586 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148959507; duration=0sec 2024-11-21T00:29:19,586 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:19,586 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:19,586 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:19,598 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:19,598 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:19,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742358_1534 (size=17181) 2024-11-21T00:29:19,598 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:19,599 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/3b6b5a00f0384e89ae78fcc23c044565, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/26b791f86dc943ebab60e5e2f82fbca0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/f0ad2fd609464b5fbf53183481ec0c83] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=36.7 K 2024-11-21T00:29:19,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/de46ac1f17024742bfe38df7440f7239 2024-11-21T00:29:19,599 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b6b5a00f0384e89ae78fcc23c044565, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732148956690 2024-11-21T00:29:19,600 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 26b791f86dc943ebab60e5e2f82fbca0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148957332 2024-11-21T00:29:19,600 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f0ad2fd609464b5fbf53183481ec0c83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732148958551 2024-11-21T00:29:19,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/488b5817991647debd43ced672cc3870 is 50, key is test_row_0/B:col10/1732148959538/Put/seqid=0 2024-11-21T00:29:19,626 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#457 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:19,626 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/1e8d597d129143aca29bf7524ae684f7 is 50, key is test_row_0/C:col10/1732148958551/Put/seqid=0 2024-11-21T00:29:19,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149019627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149019629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742360_1536 (size=12301) 2024-11-21T00:29:19,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742361_1537 (size=13051) 2024-11-21T00:29:19,687 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/1e8d597d129143aca29bf7524ae684f7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1e8d597d129143aca29bf7524ae684f7 2024-11-21T00:29:19,694 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into 1e8d597d129143aca29bf7524ae684f7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:19,694 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:19,694 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=13, startTime=1732148959508; duration=0sec 2024-11-21T00:29:19,694 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:19,694 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:19,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149019738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149019739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-21T00:29:19,796 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-21T00:29:19,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:19,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-21T00:29:19,800 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:19,800 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:19,800 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-21T00:29:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-21T00:29:19,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149019943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149019944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,955 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:19,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-21T00:29:19,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:19,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:19,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:19,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:19,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:19,987 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/8f607a49378c45978535a539bfeb0d83 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8f607a49378c45978535a539bfeb0d83 2024-11-21T00:29:19,996 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 8f607a49378c45978535a539bfeb0d83(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:19,996 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:19,996 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148959508; duration=0sec 2024-11-21T00:29:19,996 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:19,996 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:20,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/488b5817991647debd43ced672cc3870 2024-11-21T00:29:20,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/80120b7f08a844fbab937cd646f71fce is 50, key is test_row_0/C:col10/1732148959538/Put/seqid=0 2024-11-21T00:29:20,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742362_1538 (size=12301) 2024-11-21T00:29:20,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-21T00:29:20,110 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-21T00:29:20,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:20,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:20,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:20,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149020247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149020257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,263 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-21T00:29:20,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:20,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:20,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:20,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-21T00:29:20,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-21T00:29:20,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:20,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:20,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:20,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:20,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/80120b7f08a844fbab937cd646f71fce 2024-11-21T00:29:20,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/de46ac1f17024742bfe38df7440f7239 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/de46ac1f17024742bfe38df7440f7239 2024-11-21T00:29:20,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/de46ac1f17024742bfe38df7440f7239, entries=250, sequenceid=332, filesize=16.8 K 2024-11-21T00:29:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/488b5817991647debd43ced672cc3870 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/488b5817991647debd43ced672cc3870 2024-11-21T00:29:20,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/488b5817991647debd43ced672cc3870, entries=150, sequenceid=332, filesize=12.0 K 2024-11-21T00:29:20,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/80120b7f08a844fbab937cd646f71fce as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/80120b7f08a844fbab937cd646f71fce 2024-11-21T00:29:20,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/80120b7f08a844fbab937cd646f71fce, entries=150, sequenceid=332, filesize=12.0 K 2024-11-21T00:29:20,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 21e23f12556a7b945d55cc2f3dac60b0 in 990ms, sequenceid=332, compaction requested=false 2024-11-21T00:29:20,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:20,579 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-21T00:29:20,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:20,580 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-21T00:29:20,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:20,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:20,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:20,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:20,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:20,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:20,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/ee094dea5a10487bbd618a96bb81bc3d is 50, key is test_row_0/A:col10/1732148959627/Put/seqid=0 2024-11-21T00:29:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742363_1539 (size=12301) 2024-11-21T00:29:20,607 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/ee094dea5a10487bbd618a96bb81bc3d 2024-11-21T00:29:20,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/cb4723b133c9456f88a728b7cafbb70d is 50, key is test_row_0/B:col10/1732148959627/Put/seqid=0 2024-11-21T00:29:20,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742364_1540 (size=12301) 2024-11-21T00:29:20,635 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/cb4723b133c9456f88a728b7cafbb70d 2024-11-21T00:29:20,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/ce5e1be7c7ec4087babcc4491c8fb073 is 50, key is test_row_0/C:col10/1732148959627/Put/seqid=0 2024-11-21T00:29:20,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742365_1541 (size=12301) 2024-11-21T00:29:20,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:20,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149020786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149020786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149020861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,865 DEBUG [Thread-2081 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:20,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149020887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149020889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,895 DEBUG [Thread-2079 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:20,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149020891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,896 DEBUG [Thread-2077 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., hostname=0e7930017ff8,37961,1732148819586, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:20,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:20,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149020891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:20,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-21T00:29:21,064 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/ce5e1be7c7ec4087babcc4491c8fb073 2024-11-21T00:29:21,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/ee094dea5a10487bbd618a96bb81bc3d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ee094dea5a10487bbd618a96bb81bc3d 2024-11-21T00:29:21,087 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ee094dea5a10487bbd618a96bb81bc3d, entries=150, sequenceid=359, filesize=12.0 K 2024-11-21T00:29:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/cb4723b133c9456f88a728b7cafbb70d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/cb4723b133c9456f88a728b7cafbb70d 2024-11-21T00:29:21,094 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/cb4723b133c9456f88a728b7cafbb70d, entries=150, sequenceid=359, filesize=12.0 K 2024-11-21T00:29:21,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/ce5e1be7c7ec4087babcc4491c8fb073 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ce5e1be7c7ec4087babcc4491c8fb073 2024-11-21T00:29:21,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149021097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149021100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,108 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ce5e1be7c7ec4087babcc4491c8fb073, entries=150, sequenceid=359, filesize=12.0 K 2024-11-21T00:29:21,109 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 21e23f12556a7b945d55cc2f3dac60b0 in 529ms, sequenceid=359, compaction requested=true 2024-11-21T00:29:21,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:21,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:21,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-21T00:29:21,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-21T00:29:21,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-21T00:29:21,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3170 sec 2024-11-21T00:29:21,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.3240 sec 2024-11-21T00:29:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:21,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:29:21,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:21,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:21,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:21,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:21,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:21,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:21,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/0b7774d77389486a994bebf557e0353b is 50, key is test_row_0/A:col10/1732148961413/Put/seqid=0 2024-11-21T00:29:21,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742366_1542 (size=14741) 2024-11-21T00:29:21,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/0b7774d77389486a994bebf557e0353b 2024-11-21T00:29:21,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/31c206083e1a4cc39c0702c94580c993 is 50, key is test_row_0/B:col10/1732148961413/Put/seqid=0 2024-11-21T00:29:21,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742367_1543 (size=12301) 2024-11-21T00:29:21,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/31c206083e1a4cc39c0702c94580c993 2024-11-21T00:29:21,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/79cfed9c0ea447cca33e2b47a4b8ac1e is 50, key is test_row_0/C:col10/1732148961413/Put/seqid=0 2024-11-21T00:29:21,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149021528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149021528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742368_1544 (size=12301) 2024-11-21T00:29:21,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/79cfed9c0ea447cca33e2b47a4b8ac1e 2024-11-21T00:29:21,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/0b7774d77389486a994bebf557e0353b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0b7774d77389486a994bebf557e0353b 2024-11-21T00:29:21,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0b7774d77389486a994bebf557e0353b, entries=200, sequenceid=374, filesize=14.4 K 2024-11-21T00:29:21,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/31c206083e1a4cc39c0702c94580c993 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/31c206083e1a4cc39c0702c94580c993 2024-11-21T00:29:21,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/31c206083e1a4cc39c0702c94580c993, entries=150, sequenceid=374, filesize=12.0 K 2024-11-21T00:29:21,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/79cfed9c0ea447cca33e2b47a4b8ac1e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/79cfed9c0ea447cca33e2b47a4b8ac1e 2024-11-21T00:29:21,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/79cfed9c0ea447cca33e2b47a4b8ac1e, entries=150, sequenceid=374, filesize=12.0 K 2024-11-21T00:29:21,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 21e23f12556a7b945d55cc2f3dac60b0 in 210ms, sequenceid=374, compaction requested=true 2024-11-21T00:29:21,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:21,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:21,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:21,623 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:21,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:21,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:21,623 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:21,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:21,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:21,635 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57274 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:21,635 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:21,635 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:21,635 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e5071d75e4a84bd8aabde567e292af02, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/de46ac1f17024742bfe38df7440f7239, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ee094dea5a10487bbd618a96bb81bc3d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0b7774d77389486a994bebf557e0353b] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=55.9 K 2024-11-21T00:29:21,636 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5071d75e4a84bd8aabde567e292af02, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732148958551 2024-11-21T00:29:21,636 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:21,636 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:21,636 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:21,636 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8f607a49378c45978535a539bfeb0d83, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/488b5817991647debd43ced672cc3870, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/cb4723b133c9456f88a728b7cafbb70d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/31c206083e1a4cc39c0702c94580c993] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=48.8 K 2024-11-21T00:29:21,637 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting de46ac1f17024742bfe38df7440f7239, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732148958907 2024-11-21T00:29:21,637 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f607a49378c45978535a539bfeb0d83, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732148958551 2024-11-21T00:29:21,637 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee094dea5a10487bbd618a96bb81bc3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148959615 2024-11-21T00:29:21,637 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 488b5817991647debd43ced672cc3870, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732148958907 2024-11-21T00:29:21,637 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b7774d77389486a994bebf557e0353b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732148960779 2024-11-21T00:29:21,638 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting cb4723b133c9456f88a728b7cafbb70d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148959615 2024-11-21T00:29:21,638 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 31c206083e1a4cc39c0702c94580c993, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732148960779 2024-11-21T00:29:21,649 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#465 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:21,649 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/83ca168d0e484a1a99ddeb206ff7cbec is 50, key is test_row_0/B:col10/1732148961413/Put/seqid=0 2024-11-21T00:29:21,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-21T00:29:21,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:21,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:21,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:21,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:21,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:21,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:21,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:21,657 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#466 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:21,658 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/dea0dd6466c4454a94193a1d2579b887 is 50, key is test_row_0/A:col10/1732148961413/Put/seqid=0 2024-11-21T00:29:21,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/5f069001498d48ea8f4b9042cf1e77d8 is 50, key is test_row_0/A:col10/1732148961651/Put/seqid=0 2024-11-21T00:29:21,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742369_1545 (size=13187) 2024-11-21T00:29:21,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742370_1546 (size=13187) 2024-11-21T00:29:21,738 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/dea0dd6466c4454a94193a1d2579b887 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/dea0dd6466c4454a94193a1d2579b887 2024-11-21T00:29:21,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742371_1547 (size=17181) 2024-11-21T00:29:21,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/5f069001498d48ea8f4b9042cf1e77d8 2024-11-21T00:29:21,751 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into dea0dd6466c4454a94193a1d2579b887(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:21,751 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:21,751 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=12, startTime=1732148961623; duration=0sec 2024-11-21T00:29:21,751 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:21,751 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:21,751 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:21,753 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:21,753 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:21,753 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:21,754 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1e8d597d129143aca29bf7524ae684f7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/80120b7f08a844fbab937cd646f71fce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ce5e1be7c7ec4087babcc4491c8fb073, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/79cfed9c0ea447cca33e2b47a4b8ac1e] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=48.8 K 2024-11-21T00:29:21,755 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e8d597d129143aca29bf7524ae684f7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732148958551 2024-11-21T00:29:21,755 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80120b7f08a844fbab937cd646f71fce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732148958907 2024-11-21T00:29:21,755 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce5e1be7c7ec4087babcc4491c8fb073, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732148959615 2024-11-21T00:29:21,756 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79cfed9c0ea447cca33e2b47a4b8ac1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732148960779 2024-11-21T00:29:21,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149021742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/786f6482250f4887a4fae9f7d8a0d0ce is 50, key is test_row_0/B:col10/1732148961651/Put/seqid=0 2024-11-21T00:29:21,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149021758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,781 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#469 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:21,781 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/ea5e2222abf24fa0a54e086ff8d4b2b8 is 50, key is test_row_0/C:col10/1732148961413/Put/seqid=0 2024-11-21T00:29:21,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742372_1548 (size=12301) 2024-11-21T00:29:21,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/786f6482250f4887a4fae9f7d8a0d0ce 2024-11-21T00:29:21,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742373_1549 (size=13187) 2024-11-21T00:29:21,863 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/ea5e2222abf24fa0a54e086ff8d4b2b8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ea5e2222abf24fa0a54e086ff8d4b2b8 2024-11-21T00:29:21,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/34dded2a73d644f693d66e6710672adb is 50, key is test_row_0/C:col10/1732148961651/Put/seqid=0 2024-11-21T00:29:21,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149021861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,878 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into ea5e2222abf24fa0a54e086ff8d4b2b8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:21,878 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:21,878 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=12, startTime=1732148961623; duration=0sec 2024-11-21T00:29:21,878 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:21,878 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:21,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149021876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742374_1550 (size=12301) 2024-11-21T00:29:21,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-21T00:29:21,912 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-21T00:29:21,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:21,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-21T00:29:21,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-21T00:29:21,914 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:21,915 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:21,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:22,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-21T00:29:22,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-21T00:29:22,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:22,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:22,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:22,069 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:22,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:22,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:22,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149022079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149022090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,115 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/83ca168d0e484a1a99ddeb206ff7cbec as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/83ca168d0e484a1a99ddeb206ff7cbec 2024-11-21T00:29:22,120 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 83ca168d0e484a1a99ddeb206ff7cbec(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:22,121 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:22,121 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=12, startTime=1732148961623; duration=0sec 2024-11-21T00:29:22,121 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:22,121 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:22,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-21T00:29:22,222 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-21T00:29:22,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:22,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:22,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:22,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:22,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:22,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:22,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/34dded2a73d644f693d66e6710672adb 2024-11-21T00:29:22,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/5f069001498d48ea8f4b9042cf1e77d8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/5f069001498d48ea8f4b9042cf1e77d8 2024-11-21T00:29:22,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/5f069001498d48ea8f4b9042cf1e77d8, entries=250, sequenceid=397, filesize=16.8 K 2024-11-21T00:29:22,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/786f6482250f4887a4fae9f7d8a0d0ce as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/786f6482250f4887a4fae9f7d8a0d0ce 2024-11-21T00:29:22,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/786f6482250f4887a4fae9f7d8a0d0ce, entries=150, sequenceid=397, filesize=12.0 K 2024-11-21T00:29:22,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/34dded2a73d644f693d66e6710672adb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/34dded2a73d644f693d66e6710672adb 2024-11-21T00:29:22,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/34dded2a73d644f693d66e6710672adb, entries=150, sequenceid=397, filesize=12.0 K 2024-11-21T00:29:22,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 21e23f12556a7b945d55cc2f3dac60b0 in 686ms, sequenceid=397, compaction requested=false 2024-11-21T00:29:22,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:22,378 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-21T00:29:22,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:22,379 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-21T00:29:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:22,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/7c591e9fc7b347288c12625cd4f47147 is 50, key is test_row_0/A:col10/1732148961741/Put/seqid=0 2024-11-21T00:29:22,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:22,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742375_1551 (size=12301) 2024-11-21T00:29:22,442 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/7c591e9fc7b347288c12625cd4f47147 2024-11-21T00:29:22,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/b934d0e9c0a4441da62c666539060469 is 50, key is test_row_0/B:col10/1732148961741/Put/seqid=0 2024-11-21T00:29:22,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742376_1552 (size=12301) 2024-11-21T00:29:22,499 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/b934d0e9c0a4441da62c666539060469 2024-11-21T00:29:22,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-21T00:29:22,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/81c627fd482a4f378c3ace7df72443d3 is 50, key is test_row_0/C:col10/1732148961741/Put/seqid=0 2024-11-21T00:29:22,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742377_1553 (size=12301) 2024-11-21T00:29:22,552 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/81c627fd482a4f378c3ace7df72443d3 2024-11-21T00:29:22,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/7c591e9fc7b347288c12625cd4f47147 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7c591e9fc7b347288c12625cd4f47147 2024-11-21T00:29:22,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149022536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,567 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7c591e9fc7b347288c12625cd4f47147, entries=150, sequenceid=413, filesize=12.0 K 2024-11-21T00:29:22,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/b934d0e9c0a4441da62c666539060469 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/b934d0e9c0a4441da62c666539060469 2024-11-21T00:29:22,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149022552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,582 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/b934d0e9c0a4441da62c666539060469, entries=150, sequenceid=413, filesize=12.0 K 2024-11-21T00:29:22,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/81c627fd482a4f378c3ace7df72443d3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/81c627fd482a4f378c3ace7df72443d3 2024-11-21T00:29:22,604 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/81c627fd482a4f378c3ace7df72443d3, entries=150, sequenceid=413, filesize=12.0 K 2024-11-21T00:29:22,605 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 21e23f12556a7b945d55cc2f3dac60b0 in 226ms, sequenceid=413, compaction requested=true 2024-11-21T00:29:22,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:22,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:22,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-21T00:29:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-21T00:29:22,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-21T00:29:22,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 691 msec 2024-11-21T00:29:22,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 695 msec 2024-11-21T00:29:22,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:22,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-21T00:29:22,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:22,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:22,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:22,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:22,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:22,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:22,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/77838d6eaf0b48cbae12810f8fcdd828 is 50, key is test_row_0/A:col10/1732148962532/Put/seqid=0 2024-11-21T00:29:22,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742378_1554 (size=14741) 2024-11-21T00:29:22,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149022730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149022731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149022836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:22,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149022839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-21T00:29:23,021 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-21T00:29:23,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:23,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-21T00:29:23,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-21T00:29:23,024 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:23,024 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:23,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:23,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:23,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149023046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:23,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149023048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/77838d6eaf0b48cbae12810f8fcdd828 2024-11-21T00:29:23,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-21T00:29:23,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/379c247731cb4a7883c4b9492f845112 is 50, key is test_row_0/B:col10/1732148962532/Put/seqid=0 2024-11-21T00:29:23,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-21T00:29:23,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:23,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742379_1555 (size=12301) 2024-11-21T00:29:23,327 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-21T00:29:23,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:23,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-21T00:29:23,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:23,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149023356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:23,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149023356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,481 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-21T00:29:23,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:23,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/379c247731cb4a7883c4b9492f845112 2024-11-21T00:29:23,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/4ff0120f1493467ca04d8eb04c4e58bf is 50, key is test_row_0/C:col10/1732148962532/Put/seqid=0 2024-11-21T00:29:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-21T00:29:23,643 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-21T00:29:23,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:23,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742380_1556 (size=12301) 2024-11-21T00:29:23,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/4ff0120f1493467ca04d8eb04c4e58bf 2024-11-21T00:29:23,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:23,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/77838d6eaf0b48cbae12810f8fcdd828 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/77838d6eaf0b48cbae12810f8fcdd828 2024-11-21T00:29:23,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/77838d6eaf0b48cbae12810f8fcdd828, entries=200, sequenceid=437, filesize=14.4 K 2024-11-21T00:29:23,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/379c247731cb4a7883c4b9492f845112 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/379c247731cb4a7883c4b9492f845112 2024-11-21T00:29:23,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/379c247731cb4a7883c4b9492f845112, entries=150, sequenceid=437, filesize=12.0 K 2024-11-21T00:29:23,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/4ff0120f1493467ca04d8eb04c4e58bf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/4ff0120f1493467ca04d8eb04c4e58bf 2024-11-21T00:29:23,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/4ff0120f1493467ca04d8eb04c4e58bf, entries=150, sequenceid=437, filesize=12.0 K 2024-11-21T00:29:23,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 21e23f12556a7b945d55cc2f3dac60b0 in 1017ms, sequenceid=437, compaction requested=true 2024-11-21T00:29:23,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:23,686 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:23,687 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:23,687 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:23,687 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,687 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/dea0dd6466c4454a94193a1d2579b887, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/5f069001498d48ea8f4b9042cf1e77d8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7c591e9fc7b347288c12625cd4f47147, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/77838d6eaf0b48cbae12810f8fcdd828] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=56.1 K 2024-11-21T00:29:23,688 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting dea0dd6466c4454a94193a1d2579b887, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732148960779 2024-11-21T00:29:23,688 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f069001498d48ea8f4b9042cf1e77d8, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732148961486 2024-11-21T00:29:23,689 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c591e9fc7b347288c12625cd4f47147, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732148961726 2024-11-21T00:29:23,689 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77838d6eaf0b48cbae12810f8fcdd828, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148962511 2024-11-21T00:29:23,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:23,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:23,691 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:23,697 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:23,698 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:23,698 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,698 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/83ca168d0e484a1a99ddeb206ff7cbec, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/786f6482250f4887a4fae9f7d8a0d0ce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/b934d0e9c0a4441da62c666539060469, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/379c247731cb4a7883c4b9492f845112] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=48.9 K 2024-11-21T00:29:23,698 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 83ca168d0e484a1a99ddeb206ff7cbec, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732148960779 2024-11-21T00:29:23,699 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 786f6482250f4887a4fae9f7d8a0d0ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732148961486 2024-11-21T00:29:23,700 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b934d0e9c0a4441da62c666539060469, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732148961726 2024-11-21T00:29:23,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:23,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:23,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:23,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:23,700 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 379c247731cb4a7883c4b9492f845112, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148962511 2024-11-21T00:29:23,716 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#477 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:23,716 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/23610525da0d4d6da8429dcb4fcc7fac is 50, key is test_row_0/A:col10/1732148962532/Put/seqid=0 2024-11-21T00:29:23,722 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#478 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:23,722 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/976bd550c3be4b18b543d965a7ca618e is 50, key is test_row_0/B:col10/1732148962532/Put/seqid=0 2024-11-21T00:29:23,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742381_1557 (size=13323) 2024-11-21T00:29:23,773 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/23610525da0d4d6da8429dcb4fcc7fac as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/23610525da0d4d6da8429dcb4fcc7fac 2024-11-21T00:29:23,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742382_1558 (size=13323) 2024-11-21T00:29:23,788 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into 23610525da0d4d6da8429dcb4fcc7fac(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:23,788 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:23,788 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=12, startTime=1732148963685; duration=0sec 2024-11-21T00:29:23,788 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:23,788 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:23,788 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:23,791 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:23,791 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:23,791 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,791 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ea5e2222abf24fa0a54e086ff8d4b2b8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/34dded2a73d644f693d66e6710672adb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/81c627fd482a4f378c3ace7df72443d3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/4ff0120f1493467ca04d8eb04c4e58bf] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=48.9 K 2024-11-21T00:29:23,792 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea5e2222abf24fa0a54e086ff8d4b2b8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732148960779 2024-11-21T00:29:23,792 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34dded2a73d644f693d66e6710672adb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732148961486 2024-11-21T00:29:23,792 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81c627fd482a4f378c3ace7df72443d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732148961726 2024-11-21T00:29:23,792 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ff0120f1493467ca04d8eb04c4e58bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148962511 2024-11-21T00:29:23,800 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/976bd550c3be4b18b543d965a7ca618e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/976bd550c3be4b18b543d965a7ca618e 2024-11-21T00:29:23,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:23,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-21T00:29:23,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:23,801 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:29:23,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:23,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:23,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:23,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:23,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:23,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:23,810 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into 976bd550c3be4b18b543d965a7ca618e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:23,810 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:23,810 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=12, startTime=1732148963691; duration=0sec 2024-11-21T00:29:23,810 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:23,810 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:23,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/2a931d4df2df4ddfa20aec072625adf0 is 50, key is test_row_0/A:col10/1732148962729/Put/seqid=0 2024-11-21T00:29:23,825 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#480 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:23,826 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/52368d8e6d654a37a0b139126274307e is 50, key is test_row_0/C:col10/1732148962532/Put/seqid=0 2024-11-21T00:29:23,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742383_1559 (size=12301) 2024-11-21T00:29:23,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742384_1560 (size=13323) 2024-11-21T00:29:23,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:23,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:24,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149024013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149024014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149024124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-21T00:29:24,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149024127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,253 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/2a931d4df2df4ddfa20aec072625adf0 2024-11-21T00:29:24,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/a89ef10d09f449159a033eab815fc961 is 50, key is test_row_0/B:col10/1732148962729/Put/seqid=0 2024-11-21T00:29:24,275 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/52368d8e6d654a37a0b139126274307e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/52368d8e6d654a37a0b139126274307e 2024-11-21T00:29:24,282 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into 52368d8e6d654a37a0b139126274307e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:24,282 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:24,282 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=12, startTime=1732148963700; duration=0sec 2024-11-21T00:29:24,282 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:24,282 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:24,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742385_1561 (size=12301) 2024-11-21T00:29:24,312 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/a89ef10d09f449159a033eab815fc961 2024-11-21T00:29:24,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/75b9a3d5a6f8406787d8007c5d98c9d5 is 50, key is test_row_0/C:col10/1732148962729/Put/seqid=0 2024-11-21T00:29:24,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149024329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149024335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742386_1562 (size=12301) 2024-11-21T00:29:24,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149024636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149024645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:24,773 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/75b9a3d5a6f8406787d8007c5d98c9d5 2024-11-21T00:29:24,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/2a931d4df2df4ddfa20aec072625adf0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2a931d4df2df4ddfa20aec072625adf0 2024-11-21T00:29:24,781 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2a931d4df2df4ddfa20aec072625adf0, entries=150, sequenceid=450, filesize=12.0 K 2024-11-21T00:29:24,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/a89ef10d09f449159a033eab815fc961 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a89ef10d09f449159a033eab815fc961 2024-11-21T00:29:24,785 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a89ef10d09f449159a033eab815fc961, entries=150, sequenceid=450, filesize=12.0 K 2024-11-21T00:29:24,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/75b9a3d5a6f8406787d8007c5d98c9d5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/75b9a3d5a6f8406787d8007c5d98c9d5 2024-11-21T00:29:24,790 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/75b9a3d5a6f8406787d8007c5d98c9d5, entries=150, sequenceid=450, filesize=12.0 K 2024-11-21T00:29:24,791 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 21e23f12556a7b945d55cc2f3dac60b0 in 990ms, sequenceid=450, compaction requested=false 2024-11-21T00:29:24,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:24,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:24,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-21T00:29:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-21T00:29:24,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-21T00:29:24,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7700 sec 2024-11-21T00:29:24,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.7720 sec 2024-11-21T00:29:24,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-21T00:29:24,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:24,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:24,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:24,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:24,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:24,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:24,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:24,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/29553f2d18fa4378883c30db52e86305 is 50, key is test_row_0/A:col10/1732148964909/Put/seqid=0 2024-11-21T00:29:24,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742387_1563 (size=14741) 2024-11-21T00:29:24,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/29553f2d18fa4378883c30db52e86305 2024-11-21T00:29:24,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/3e3e31d535fe4c2b9024be66a1fb9f6a is 50, key is test_row_0/B:col10/1732148964909/Put/seqid=0 2024-11-21T00:29:24,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742388_1564 (size=12301) 2024-11-21T00:29:24,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149024987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149024994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149024999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149025101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149025101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149025110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-21T00:29:25,133 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-21T00:29:25,139 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:25,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59498 deadline: 1732149025139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-21T00:29:25,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-21T00:29:25,149 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:25,149 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:25,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:25,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59544 deadline: 1732149025153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-21T00:29:25,303 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-21T00:29:25,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:25,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:25,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:25,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149025307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149025307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149025319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/3e3e31d535fe4c2b9024be66a1fb9f6a 2024-11-21T00:29:25,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/630eb8b609234da784b61fe90e5a25a4 is 50, key is test_row_0/C:col10/1732148964909/Put/seqid=0 2024-11-21T00:29:25,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742389_1565 (size=12301) 2024-11-21T00:29:25,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/630eb8b609234da784b61fe90e5a25a4 2024-11-21T00:29:25,455 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-21T00:29:25,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:25,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:25,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:25,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/29553f2d18fa4378883c30db52e86305 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/29553f2d18fa4378883c30db52e86305 2024-11-21T00:29:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:25,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/29553f2d18fa4378883c30db52e86305, entries=200, sequenceid=477, filesize=14.4 K 2024-11-21T00:29:25,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-21T00:29:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/3e3e31d535fe4c2b9024be66a1fb9f6a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3e3e31d535fe4c2b9024be66a1fb9f6a 2024-11-21T00:29:25,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3e3e31d535fe4c2b9024be66a1fb9f6a, entries=150, sequenceid=477, filesize=12.0 K 2024-11-21T00:29:25,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/630eb8b609234da784b61fe90e5a25a4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/630eb8b609234da784b61fe90e5a25a4 2024-11-21T00:29:25,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/630eb8b609234da784b61fe90e5a25a4, entries=150, sequenceid=477, filesize=12.0 K 2024-11-21T00:29:25,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 21e23f12556a7b945d55cc2f3dac60b0 in 574ms, sequenceid=477, compaction requested=true 2024-11-21T00:29:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:25,485 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:25,486 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:25,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:25,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:25,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:25,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:25,486 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:25,487 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:25,488 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,488 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/23610525da0d4d6da8429dcb4fcc7fac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2a931d4df2df4ddfa20aec072625adf0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/29553f2d18fa4378883c30db52e86305] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=39.4 K 2024-11-21T00:29:25,488 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:25,488 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:25,489 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,489 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/976bd550c3be4b18b543d965a7ca618e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a89ef10d09f449159a033eab815fc961, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3e3e31d535fe4c2b9024be66a1fb9f6a] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=37.0 K 2024-11-21T00:29:25,489 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23610525da0d4d6da8429dcb4fcc7fac, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148962511 2024-11-21T00:29:25,489 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a931d4df2df4ddfa20aec072625adf0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732148962727 2024-11-21T00:29:25,489 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 976bd550c3be4b18b543d965a7ca618e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148962511 2024-11-21T00:29:25,493 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting a89ef10d09f449159a033eab815fc961, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732148962727 2024-11-21T00:29:25,493 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29553f2d18fa4378883c30db52e86305, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732148963982 2024-11-21T00:29:25,495 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e3e31d535fe4c2b9024be66a1fb9f6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732148964010 2024-11-21T00:29:25,509 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#486 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:25,509 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/e6ca5e8f555644779eb41da4fcc52b60 is 50, key is test_row_0/B:col10/1732148964909/Put/seqid=0 2024-11-21T00:29:25,509 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:25,511 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/f5b7c31cd45e4bce960a10c0da3c4b36 is 50, key is test_row_0/A:col10/1732148964909/Put/seqid=0 2024-11-21T00:29:25,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742390_1566 (size=13425) 2024-11-21T00:29:25,560 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/e6ca5e8f555644779eb41da4fcc52b60 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/e6ca5e8f555644779eb41da4fcc52b60 2024-11-21T00:29:25,580 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into e6ca5e8f555644779eb41da4fcc52b60(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:25,580 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:25,580 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148965485; duration=0sec 2024-11-21T00:29:25,581 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:25,581 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:25,581 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:25,582 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:25,583 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/C is initiating minor compaction (all files) 2024-11-21T00:29:25,583 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/C in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,583 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/52368d8e6d654a37a0b139126274307e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/75b9a3d5a6f8406787d8007c5d98c9d5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/630eb8b609234da784b61fe90e5a25a4] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=37.0 K 2024-11-21T00:29:25,583 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 52368d8e6d654a37a0b139126274307e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732148962511 2024-11-21T00:29:25,583 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 75b9a3d5a6f8406787d8007c5d98c9d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732148962727 2024-11-21T00:29:25,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742391_1567 (size=13425) 2024-11-21T00:29:25,596 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 630eb8b609234da784b61fe90e5a25a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732148964010 2024-11-21T00:29:25,603 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/f5b7c31cd45e4bce960a10c0da3c4b36 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/f5b7c31cd45e4bce960a10c0da3c4b36 2024-11-21T00:29:25,610 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into f5b7c31cd45e4bce960a10c0da3c4b36(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:25,610 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:25,610 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148965485; duration=0sec 2024-11-21T00:29:25,610 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:25,610 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:25,611 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-21T00:29:25,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,611 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:29:25,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:25,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:25,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:25,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:25,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:25,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. as already flushing 2024-11-21T00:29:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:25,617 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#C#compaction#488 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:25,618 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/820ed8233c6b46a0bc79954dda65f5c4 is 50, key is test_row_0/C:col10/1732148964909/Put/seqid=0 2024-11-21T00:29:25,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/787668a3f66246b3b293405af48117a8 is 50, key is test_row_0/A:col10/1732148964996/Put/seqid=0 2024-11-21T00:29:25,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742392_1568 (size=13425) 2024-11-21T00:29:25,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742393_1569 (size=12301) 2024-11-21T00:29:25,681 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/787668a3f66246b3b293405af48117a8 2024-11-21T00:29:25,682 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/820ed8233c6b46a0bc79954dda65f5c4 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/820ed8233c6b46a0bc79954dda65f5c4 2024-11-21T00:29:25,689 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/C of 21e23f12556a7b945d55cc2f3dac60b0 into 820ed8233c6b46a0bc79954dda65f5c4(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:25,689 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:25,689 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/C, priority=13, startTime=1732148965486; duration=0sec 2024-11-21T00:29:25,689 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:25,689 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:25,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/52998309678d4117bd1fe3ea3b4abaa9 is 50, key is test_row_0/B:col10/1732148964996/Put/seqid=0 2024-11-21T00:29:25,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149025706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149025715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149025716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742394_1570 (size=12301) 2024-11-21T00:29:25,726 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/52998309678d4117bd1fe3ea3b4abaa9 2024-11-21T00:29:25,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/334c88e05f094795aa5f9086363361d9 is 50, key is test_row_0/C:col10/1732148964996/Put/seqid=0 2024-11-21T00:29:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-21T00:29:25,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742395_1571 (size=12301) 2024-11-21T00:29:25,796 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/334c88e05f094795aa5f9086363361d9 2024-11-21T00:29:25,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/787668a3f66246b3b293405af48117a8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/787668a3f66246b3b293405af48117a8 2024-11-21T00:29:25,813 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/787668a3f66246b3b293405af48117a8, entries=150, sequenceid=490, filesize=12.0 K 2024-11-21T00:29:25,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/52998309678d4117bd1fe3ea3b4abaa9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/52998309678d4117bd1fe3ea3b4abaa9 2024-11-21T00:29:25,818 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/52998309678d4117bd1fe3ea3b4abaa9, entries=150, sequenceid=490, filesize=12.0 K 2024-11-21T00:29:25,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/334c88e05f094795aa5f9086363361d9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/334c88e05f094795aa5f9086363361d9 2024-11-21T00:29:25,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149025817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,825 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/334c88e05f094795aa5f9086363361d9, entries=150, sequenceid=490, filesize=12.0 K 2024-11-21T00:29:25,828 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 21e23f12556a7b945d55cc2f3dac60b0 in 217ms, sequenceid=490, compaction requested=false 2024-11-21T00:29:25,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:25,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:25,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-21T00:29:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-21T00:29:25,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-21T00:29:25,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 685 msec 2024-11-21T00:29:25,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-21T00:29:25,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:25,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:25,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:25,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:25,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:25,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:25,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 698 msec 2024-11-21T00:29:25,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/e2b2c8957678400c87c6724e99b53964 is 50, key is test_row_0/A:col10/1732148965713/Put/seqid=0 2024-11-21T00:29:25,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742396_1572 (size=17181) 2024-11-21T00:29:25,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/e2b2c8957678400c87c6724e99b53964 2024-11-21T00:29:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149025869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149025871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/1532f4caf70f48818612fd6ecc4b5208 is 50, key is test_row_0/B:col10/1732148965713/Put/seqid=0 2024-11-21T00:29:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742397_1573 (size=12301) 2024-11-21T00:29:25,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/1532f4caf70f48818612fd6ecc4b5208 2024-11-21T00:29:25,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/c09000b020e9415996591a4370690d79 is 50, key is test_row_0/C:col10/1732148965713/Put/seqid=0 2024-11-21T00:29:25,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59574 deadline: 1732149025987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:25,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59510 deadline: 1732149025987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:26,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742398_1574 (size=12301) 2024-11-21T00:29:26,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/c09000b020e9415996591a4370690d79 2024-11-21T00:29:26,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/e2b2c8957678400c87c6724e99b53964 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e2b2c8957678400c87c6724e99b53964 2024-11-21T00:29:26,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59558 deadline: 1732149026027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:26,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e2b2c8957678400c87c6724e99b53964, entries=250, sequenceid=519, filesize=16.8 K 2024-11-21T00:29:26,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/1532f4caf70f48818612fd6ecc4b5208 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/1532f4caf70f48818612fd6ecc4b5208 2024-11-21T00:29:26,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/1532f4caf70f48818612fd6ecc4b5208, entries=150, sequenceid=519, filesize=12.0 K 2024-11-21T00:29:26,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/c09000b020e9415996591a4370690d79 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c09000b020e9415996591a4370690d79 2024-11-21T00:29:26,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c09000b020e9415996591a4370690d79, entries=150, sequenceid=519, filesize=12.0 K 2024-11-21T00:29:26,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 21e23f12556a7b945d55cc2f3dac60b0 in 228ms, sequenceid=519, compaction requested=true 2024-11-21T00:29:26,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:26,064 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:26,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:26,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:26,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:26,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:26,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:26,065 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:26,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:26,065 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42907 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:26,065 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/A is initiating minor compaction (all files) 2024-11-21T00:29:26,066 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/A in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:26,066 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/f5b7c31cd45e4bce960a10c0da3c4b36, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/787668a3f66246b3b293405af48117a8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e2b2c8957678400c87c6724e99b53964] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=41.9 K 2024-11-21T00:29:26,066 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:26,066 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): 21e23f12556a7b945d55cc2f3dac60b0/B is initiating minor compaction (all files) 2024-11-21T00:29:26,066 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 21e23f12556a7b945d55cc2f3dac60b0/B in TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:26,066 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/e6ca5e8f555644779eb41da4fcc52b60, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/52998309678d4117bd1fe3ea3b4abaa9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/1532f4caf70f48818612fd6ecc4b5208] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp, totalSize=37.1 K 2024-11-21T00:29:26,066 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5b7c31cd45e4bce960a10c0da3c4b36, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732148964010 2024-11-21T00:29:26,067 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e6ca5e8f555644779eb41da4fcc52b60, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732148964010 2024-11-21T00:29:26,067 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 787668a3f66246b3b293405af48117a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732148964943 2024-11-21T00:29:26,067 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 52998309678d4117bd1fe3ea3b4abaa9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732148964943 2024-11-21T00:29:26,067 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2b2c8957678400c87c6724e99b53964, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=519, earliestPutTs=1732148965704 2024-11-21T00:29:26,068 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 1532f4caf70f48818612fd6ecc4b5208, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=519, earliestPutTs=1732148965713 2024-11-21T00:29:26,097 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#A#compaction#495 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:26,098 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/8aff3519c81045629381c279a032f08d is 50, key is test_row_0/A:col10/1732148965713/Put/seqid=0 2024-11-21T00:29:26,112 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21e23f12556a7b945d55cc2f3dac60b0#B#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:26,112 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/a8fa7ae3572c4ff3979f9cec2bbd2ef0 is 50, key is test_row_0/B:col10/1732148965713/Put/seqid=0 2024-11-21T00:29:26,113 DEBUG [Thread-2090 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54e8a98a to 127.0.0.1:64241 2024-11-21T00:29:26,113 DEBUG [Thread-2090 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,115 DEBUG [Thread-2084 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10bda459 to 127.0.0.1:64241 2024-11-21T00:29:26,115 DEBUG [Thread-2084 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,117 DEBUG [Thread-2086 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0657e1bf to 127.0.0.1:64241 2024-11-21T00:29:26,117 DEBUG [Thread-2086 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,121 DEBUG [Thread-2088 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dee2855 to 127.0.0.1:64241 2024-11-21T00:29:26,121 DEBUG [Thread-2088 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,132 DEBUG [Thread-2092 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x677030bd to 127.0.0.1:64241 2024-11-21T00:29:26,132 DEBUG [Thread-2092 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,155 DEBUG [Thread-2075 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d125972 to 127.0.0.1:64241 2024-11-21T00:29:26,155 DEBUG [Thread-2075 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742399_1575 (size=13527) 2024-11-21T00:29:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742400_1576 (size=13527) 2024-11-21T00:29:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:26,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-21T00:29:26,166 DEBUG [Thread-2073 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x315a23ef to 127.0.0.1:64241 2024-11-21T00:29:26,166 DEBUG [Thread-2073 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:26,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:26,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:26,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:26,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:26,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:26,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/8e5b260654d3416a9dfeefa62edcc05b is 50, key is test_row_0/A:col10/1732148966165/Put/seqid=0 2024-11-21T00:29:26,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742401_1577 (size=12301) 2024-11-21T00:29:26,194 DEBUG [Thread-2077 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x134bfe32 to 127.0.0.1:64241 2024-11-21T00:29:26,194 DEBUG [Thread-2079 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b55f2f to 127.0.0.1:64241 2024-11-21T00:29:26,194 DEBUG [Thread-2077 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,194 DEBUG [Thread-2079 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-21T00:29:26,268 INFO [Thread-2083 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-21T00:29:26,345 DEBUG [Thread-2081 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x646ca555 to 127.0.0.1:64241 2024-11-21T00:29:26,345 DEBUG [Thread-2081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 129 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 120 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1283 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3849 rows 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1268 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3804 rows 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1277 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3831 rows 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1269 2024-11-21T00:29:26,345 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3807 rows 2024-11-21T00:29:26,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1271 2024-11-21T00:29:26,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3807 rows 2024-11-21T00:29:26,346 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-21T00:29:26,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x345fa4f7 to 127.0.0.1:64241 2024-11-21T00:29:26,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:26,347 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-21T00:29:26,348 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-21T00:29:26,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:26,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-21T00:29:26,356 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148966355"}]},"ts":"1732148966355"} 2024-11-21T00:29:26,357 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-21T00:29:26,373 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-21T00:29:26,374 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:29:26,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=21e23f12556a7b945d55cc2f3dac60b0, UNASSIGN}] 2024-11-21T00:29:26,376 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=21e23f12556a7b945d55cc2f3dac60b0, UNASSIGN 2024-11-21T00:29:26,376 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=21e23f12556a7b945d55cc2f3dac60b0, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:26,378 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:29:26,378 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure 21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:29:26,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-21T00:29:26,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:26,530 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:26,531 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:29:26,531 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing 21e23f12556a7b945d55cc2f3dac60b0, disabling compactions & flushes 2024-11-21T00:29:26,531 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1942): waiting for 2 compactions & cache flush to complete for region TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:26,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/8e5b260654d3416a9dfeefa62edcc05b 2024-11-21T00:29:26,587 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/a8fa7ae3572c4ff3979f9cec2bbd2ef0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a8fa7ae3572c4ff3979f9cec2bbd2ef0 2024-11-21T00:29:26,587 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/8aff3519c81045629381c279a032f08d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/8aff3519c81045629381c279a032f08d 2024-11-21T00:29:26,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/731e4aa74a2d4f658e5d3d2074002837 is 50, key is test_row_0/B:col10/1732148966165/Put/seqid=0 2024-11-21T00:29:26,596 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/A of 21e23f12556a7b945d55cc2f3dac60b0 into 8aff3519c81045629381c279a032f08d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:26,596 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 21e23f12556a7b945d55cc2f3dac60b0/B of 21e23f12556a7b945d55cc2f3dac60b0 into a8fa7ae3572c4ff3979f9cec2bbd2ef0(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:26,596 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/A, priority=13, startTime=1732148966064; duration=0sec 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:26,596 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0., storeName=21e23f12556a7b945d55cc2f3dac60b0/B, priority=13, startTime=1732148966065; duration=0sec 2024-11-21T00:29:26,596 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. because compaction request was cancelled 2024-11-21T00:29:26,596 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:26,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742402_1578 (size=12301) 2024-11-21T00:29:26,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-21T00:29:26,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-21T00:29:27,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/731e4aa74a2d4f658e5d3d2074002837 2024-11-21T00:29:27,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/45009858745f4545b4643c758e1900a7 is 50, key is test_row_0/C:col10/1732148966165/Put/seqid=0 2024-11-21T00:29:27,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742403_1579 (size=12301) 2024-11-21T00:29:27,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/45009858745f4545b4643c758e1900a7 2024-11-21T00:29:27,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/8e5b260654d3416a9dfeefa62edcc05b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/8e5b260654d3416a9dfeefa62edcc05b 2024-11-21T00:29:27,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/8e5b260654d3416a9dfeefa62edcc05b, entries=150, sequenceid=530, filesize=12.0 K 2024-11-21T00:29:27,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/731e4aa74a2d4f658e5d3d2074002837 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/731e4aa74a2d4f658e5d3d2074002837 2024-11-21T00:29:27,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/731e4aa74a2d4f658e5d3d2074002837, entries=150, sequenceid=530, filesize=12.0 K 2024-11-21T00:29:27,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/45009858745f4545b4643c758e1900a7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/45009858745f4545b4643c758e1900a7 2024-11-21T00:29:27,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/45009858745f4545b4643c758e1900a7, entries=150, sequenceid=530, filesize=12.0 K 2024-11-21T00:29:27,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=20.13 KB/20610 for 21e23f12556a7b945d55cc2f3dac60b0 in 986ms, sequenceid=530, compaction requested=true 2024-11-21T00:29:27,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:27,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:27,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:27,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:27,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:29:27,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 21e23f12556a7b945d55cc2f3dac60b0:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:27,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-21T00:29:27,153 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. after waiting 0 ms 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:27,153 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2837): Flushing 21e23f12556a7b945d55cc2f3dac60b0 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=A 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=B 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 21e23f12556a7b945d55cc2f3dac60b0, store=C 2024-11-21T00:29:27,153 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:27,153 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. because compaction request was cancelled 2024-11-21T00:29:27,154 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:C 2024-11-21T00:29:27,154 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. because compaction request was cancelled 2024-11-21T00:29:27,154 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:A 2024-11-21T00:29:27,154 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. because compaction request was cancelled 2024-11-21T00:29:27,154 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21e23f12556a7b945d55cc2f3dac60b0:B 2024-11-21T00:29:27,170 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/1b1e0a77162d4a2bb5a28f00b2548cfd is 50, key is test_row_0/A:col10/1732148966193/Put/seqid=0 2024-11-21T00:29:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742404_1580 (size=12301) 2024-11-21T00:29:27,213 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/1b1e0a77162d4a2bb5a28f00b2548cfd 2024-11-21T00:29:27,246 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/678114d8ed18429aa96c83d444c9eb2d is 50, key is test_row_0/B:col10/1732148966193/Put/seqid=0 2024-11-21T00:29:27,298 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:29:27,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742405_1581 (size=12301) 2024-11-21T00:29:27,317 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/678114d8ed18429aa96c83d444c9eb2d 2024-11-21T00:29:27,367 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/edcfc884ce3e457b87cb526337411ec6 is 50, key is test_row_0/C:col10/1732148966193/Put/seqid=0 2024-11-21T00:29:27,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742406_1582 (size=12301) 2024-11-21T00:29:27,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-21T00:29:27,807 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/edcfc884ce3e457b87cb526337411ec6 2024-11-21T00:29:27,841 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/A/1b1e0a77162d4a2bb5a28f00b2548cfd as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1b1e0a77162d4a2bb5a28f00b2548cfd 2024-11-21T00:29:27,869 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1b1e0a77162d4a2bb5a28f00b2548cfd, entries=150, sequenceid=538, filesize=12.0 K 2024-11-21T00:29:27,870 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/B/678114d8ed18429aa96c83d444c9eb2d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/678114d8ed18429aa96c83d444c9eb2d 2024-11-21T00:29:27,899 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/678114d8ed18429aa96c83d444c9eb2d, entries=150, sequenceid=538, filesize=12.0 K 2024-11-21T00:29:27,911 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/.tmp/C/edcfc884ce3e457b87cb526337411ec6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/edcfc884ce3e457b87cb526337411ec6 2024-11-21T00:29:27,924 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/edcfc884ce3e457b87cb526337411ec6, entries=150, sequenceid=538, filesize=12.0 K 2024-11-21T00:29:27,928 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 21e23f12556a7b945d55cc2f3dac60b0 in 774ms, sequenceid=538, compaction requested=true 2024-11-21T00:29:27,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d4d77d46e5714bf091f1536ec2c23fc7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/a3938f1e96fd418f8cb54166a173da65, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2f0924278f7e4536add1c40be8a393fa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/329cc9126f0346c38e762d1a1b1bcf4b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d26815f4c57b46a4aa44b3f79e96216a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/86576a87409b41adb5dbbf0251dab9e3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/c01cf1435c2d48e6bdece9fcdb7abe29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/aeeaeb10b0c14435a54431fc01b0af07, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1313266f48894ae89e8388edad4cbbda, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/73ac0faa20724aeba1837171771d4ff4, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/34c6583b3f554379a7b9d17ce3577540, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/279e9dbaae0c4cba84762baca3505a32, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/9b931e20b52e41b88cc0e77c8e73b5a6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7573134e9ed048228b42574ea3ff4906, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/4b0a08646e324382840256322e291e93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/66aed2d34f2f4ec5a99ceff074a4f979, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/804940a9ff0c43d4ad527ee5705c01c6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/3e939841ce4f41ec95dbe904636b95e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ada733c79b3d4cbcab30f0765072b244, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0efde73accd14a678c02f092bfbc9d93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/786c8fa09d4e40e68d81950c88f2cc82, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/058536168d9b48d8b6f49aa0c9d996b8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e5071d75e4a84bd8aabde567e292af02, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/de46ac1f17024742bfe38df7440f7239, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ee094dea5a10487bbd618a96bb81bc3d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0b7774d77389486a994bebf557e0353b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/dea0dd6466c4454a94193a1d2579b887, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/5f069001498d48ea8f4b9042cf1e77d8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7c591e9fc7b347288c12625cd4f47147, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/77838d6eaf0b48cbae12810f8fcdd828, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/23610525da0d4d6da8429dcb4fcc7fac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2a931d4df2df4ddfa20aec072625adf0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/29553f2d18fa4378883c30db52e86305, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/f5b7c31cd45e4bce960a10c0da3c4b36, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/787668a3f66246b3b293405af48117a8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e2b2c8957678400c87c6724e99b53964] to archive 2024-11-21T00:29:27,946 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:29:27,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d4d77d46e5714bf091f1536ec2c23fc7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d4d77d46e5714bf091f1536ec2c23fc7 2024-11-21T00:29:27,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/a3938f1e96fd418f8cb54166a173da65 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/a3938f1e96fd418f8cb54166a173da65 2024-11-21T00:29:27,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2f0924278f7e4536add1c40be8a393fa to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2f0924278f7e4536add1c40be8a393fa 2024-11-21T00:29:27,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/329cc9126f0346c38e762d1a1b1bcf4b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/329cc9126f0346c38e762d1a1b1bcf4b 2024-11-21T00:29:28,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d26815f4c57b46a4aa44b3f79e96216a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/d26815f4c57b46a4aa44b3f79e96216a 2024-11-21T00:29:28,019 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/86576a87409b41adb5dbbf0251dab9e3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/86576a87409b41adb5dbbf0251dab9e3 2024-11-21T00:29:28,023 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/c01cf1435c2d48e6bdece9fcdb7abe29 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/c01cf1435c2d48e6bdece9fcdb7abe29 2024-11-21T00:29:28,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/aeeaeb10b0c14435a54431fc01b0af07 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/aeeaeb10b0c14435a54431fc01b0af07 2024-11-21T00:29:28,031 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1313266f48894ae89e8388edad4cbbda to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1313266f48894ae89e8388edad4cbbda 2024-11-21T00:29:28,033 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/73ac0faa20724aeba1837171771d4ff4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/73ac0faa20724aeba1837171771d4ff4 2024-11-21T00:29:28,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/34c6583b3f554379a7b9d17ce3577540 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/34c6583b3f554379a7b9d17ce3577540 2024-11-21T00:29:28,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/279e9dbaae0c4cba84762baca3505a32 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/279e9dbaae0c4cba84762baca3505a32 2024-11-21T00:29:28,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/9b931e20b52e41b88cc0e77c8e73b5a6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/9b931e20b52e41b88cc0e77c8e73b5a6 2024-11-21T00:29:28,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7573134e9ed048228b42574ea3ff4906 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7573134e9ed048228b42574ea3ff4906 2024-11-21T00:29:28,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/4b0a08646e324382840256322e291e93 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/4b0a08646e324382840256322e291e93 2024-11-21T00:29:28,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/66aed2d34f2f4ec5a99ceff074a4f979 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/66aed2d34f2f4ec5a99ceff074a4f979 2024-11-21T00:29:28,050 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/804940a9ff0c43d4ad527ee5705c01c6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/804940a9ff0c43d4ad527ee5705c01c6 2024-11-21T00:29:28,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/3e939841ce4f41ec95dbe904636b95e6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/3e939841ce4f41ec95dbe904636b95e6 2024-11-21T00:29:28,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ada733c79b3d4cbcab30f0765072b244 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ada733c79b3d4cbcab30f0765072b244 2024-11-21T00:29:28,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0efde73accd14a678c02f092bfbc9d93 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0efde73accd14a678c02f092bfbc9d93 2024-11-21T00:29:28,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/786c8fa09d4e40e68d81950c88f2cc82 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/786c8fa09d4e40e68d81950c88f2cc82 2024-11-21T00:29:28,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/058536168d9b48d8b6f49aa0c9d996b8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/058536168d9b48d8b6f49aa0c9d996b8 2024-11-21T00:29:28,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e5071d75e4a84bd8aabde567e292af02 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e5071d75e4a84bd8aabde567e292af02 2024-11-21T00:29:28,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/de46ac1f17024742bfe38df7440f7239 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/de46ac1f17024742bfe38df7440f7239 2024-11-21T00:29:28,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ee094dea5a10487bbd618a96bb81bc3d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/ee094dea5a10487bbd618a96bb81bc3d 2024-11-21T00:29:28,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0b7774d77389486a994bebf557e0353b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/0b7774d77389486a994bebf557e0353b 2024-11-21T00:29:28,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/dea0dd6466c4454a94193a1d2579b887 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/dea0dd6466c4454a94193a1d2579b887 2024-11-21T00:29:28,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/5f069001498d48ea8f4b9042cf1e77d8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/5f069001498d48ea8f4b9042cf1e77d8 2024-11-21T00:29:28,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7c591e9fc7b347288c12625cd4f47147 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/7c591e9fc7b347288c12625cd4f47147 2024-11-21T00:29:28,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/77838d6eaf0b48cbae12810f8fcdd828 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/77838d6eaf0b48cbae12810f8fcdd828 2024-11-21T00:29:28,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/23610525da0d4d6da8429dcb4fcc7fac to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/23610525da0d4d6da8429dcb4fcc7fac 2024-11-21T00:29:28,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2a931d4df2df4ddfa20aec072625adf0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/2a931d4df2df4ddfa20aec072625adf0 2024-11-21T00:29:28,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/29553f2d18fa4378883c30db52e86305 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/29553f2d18fa4378883c30db52e86305 2024-11-21T00:29:28,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/f5b7c31cd45e4bce960a10c0da3c4b36 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/f5b7c31cd45e4bce960a10c0da3c4b36 2024-11-21T00:29:28,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/787668a3f66246b3b293405af48117a8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/787668a3f66246b3b293405af48117a8 2024-11-21T00:29:28,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e2b2c8957678400c87c6724e99b53964 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/e2b2c8957678400c87c6724e99b53964 2024-11-21T00:29:28,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3fd9f3aa8598476ab9886c042f340a64, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/19ad8ee9ad274a0da4a5820cb105bf42, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5d9851ce9f5a48dfa16b8106d3fec314, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/568b4a34b69749e1a211e0de434b5560, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/add482e2805f4221a4d85051181df494, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/9eba807bb4474b0983734ccacc765489, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/48106425f6ef410ea3dba6cb27d326ef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/fece8372703f4446b0390a0276966d0f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/ca8039c595bd4c0598131309f88787fd, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ff2b1fd15a34431a42e39a4f67ffb19, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/d778c371c7264c94ab32ff47693815f2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/c85d50e1931b48afb7d5ebb7aa3a7de9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0d24dbf58b4f4cfda7b78d429591a930, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0cabcc9e5d3b439bac9c0b3f9580e05c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/dd1d876abcd94447ae622ecd79e483d0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/7fc990ac0e884a8ca47cb8b14ad397e3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/84f2af6fa127446c87f84e5abeebb8d3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5ef94a752b424f2d9055b0c20e747e28, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/76ea504f33f14abf80569322014cd2df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8c2002237c614a70a6dbce81aa443ee9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8b9d5280c8d34bd28bbe7119b88e1891, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8f607a49378c45978535a539bfeb0d83, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ef90d41881c4b27834bcb5b5df3c1e2, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/488b5817991647debd43ced672cc3870, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/cb4723b133c9456f88a728b7cafbb70d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/83ca168d0e484a1a99ddeb206ff7cbec, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/31c206083e1a4cc39c0702c94580c993, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/786f6482250f4887a4fae9f7d8a0d0ce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/b934d0e9c0a4441da62c666539060469, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/976bd550c3be4b18b543d965a7ca618e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/379c247731cb4a7883c4b9492f845112, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a89ef10d09f449159a033eab815fc961, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/e6ca5e8f555644779eb41da4fcc52b60, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3e3e31d535fe4c2b9024be66a1fb9f6a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/52998309678d4117bd1fe3ea3b4abaa9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/1532f4caf70f48818612fd6ecc4b5208] to archive 2024-11-21T00:29:28,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:29:28,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3fd9f3aa8598476ab9886c042f340a64 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3fd9f3aa8598476ab9886c042f340a64 2024-11-21T00:29:28,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/19ad8ee9ad274a0da4a5820cb105bf42 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/19ad8ee9ad274a0da4a5820cb105bf42 2024-11-21T00:29:28,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5d9851ce9f5a48dfa16b8106d3fec314 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5d9851ce9f5a48dfa16b8106d3fec314 2024-11-21T00:29:28,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/568b4a34b69749e1a211e0de434b5560 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/568b4a34b69749e1a211e0de434b5560 2024-11-21T00:29:28,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/add482e2805f4221a4d85051181df494 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/add482e2805f4221a4d85051181df494 2024-11-21T00:29:28,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/9eba807bb4474b0983734ccacc765489 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/9eba807bb4474b0983734ccacc765489 2024-11-21T00:29:28,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/48106425f6ef410ea3dba6cb27d326ef to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/48106425f6ef410ea3dba6cb27d326ef 2024-11-21T00:29:28,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/fece8372703f4446b0390a0276966d0f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/fece8372703f4446b0390a0276966d0f 2024-11-21T00:29:28,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/ca8039c595bd4c0598131309f88787fd to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/ca8039c595bd4c0598131309f88787fd 2024-11-21T00:29:28,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ff2b1fd15a34431a42e39a4f67ffb19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ff2b1fd15a34431a42e39a4f67ffb19 2024-11-21T00:29:28,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/d778c371c7264c94ab32ff47693815f2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/d778c371c7264c94ab32ff47693815f2 2024-11-21T00:29:28,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/c85d50e1931b48afb7d5ebb7aa3a7de9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/c85d50e1931b48afb7d5ebb7aa3a7de9 2024-11-21T00:29:28,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0d24dbf58b4f4cfda7b78d429591a930 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0d24dbf58b4f4cfda7b78d429591a930 2024-11-21T00:29:28,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0cabcc9e5d3b439bac9c0b3f9580e05c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/0cabcc9e5d3b439bac9c0b3f9580e05c 2024-11-21T00:29:28,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/dd1d876abcd94447ae622ecd79e483d0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/dd1d876abcd94447ae622ecd79e483d0 2024-11-21T00:29:28,191 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/7fc990ac0e884a8ca47cb8b14ad397e3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/7fc990ac0e884a8ca47cb8b14ad397e3 2024-11-21T00:29:28,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/84f2af6fa127446c87f84e5abeebb8d3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/84f2af6fa127446c87f84e5abeebb8d3 2024-11-21T00:29:28,193 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5ef94a752b424f2d9055b0c20e747e28 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/5ef94a752b424f2d9055b0c20e747e28 2024-11-21T00:29:28,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/76ea504f33f14abf80569322014cd2df to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/76ea504f33f14abf80569322014cd2df 2024-11-21T00:29:28,205 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8c2002237c614a70a6dbce81aa443ee9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8c2002237c614a70a6dbce81aa443ee9 2024-11-21T00:29:28,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8b9d5280c8d34bd28bbe7119b88e1891 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8b9d5280c8d34bd28bbe7119b88e1891 2024-11-21T00:29:28,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8f607a49378c45978535a539bfeb0d83 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/8f607a49378c45978535a539bfeb0d83 2024-11-21T00:29:28,223 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ef90d41881c4b27834bcb5b5df3c1e2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/4ef90d41881c4b27834bcb5b5df3c1e2 2024-11-21T00:29:28,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/488b5817991647debd43ced672cc3870 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/488b5817991647debd43ced672cc3870 2024-11-21T00:29:28,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/cb4723b133c9456f88a728b7cafbb70d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/cb4723b133c9456f88a728b7cafbb70d 2024-11-21T00:29:28,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/83ca168d0e484a1a99ddeb206ff7cbec to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/83ca168d0e484a1a99ddeb206ff7cbec 2024-11-21T00:29:28,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/31c206083e1a4cc39c0702c94580c993 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/31c206083e1a4cc39c0702c94580c993 2024-11-21T00:29:28,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/786f6482250f4887a4fae9f7d8a0d0ce to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/786f6482250f4887a4fae9f7d8a0d0ce 2024-11-21T00:29:28,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/b934d0e9c0a4441da62c666539060469 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/b934d0e9c0a4441da62c666539060469 2024-11-21T00:29:28,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/976bd550c3be4b18b543d965a7ca618e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/976bd550c3be4b18b543d965a7ca618e 2024-11-21T00:29:28,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/379c247731cb4a7883c4b9492f845112 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/379c247731cb4a7883c4b9492f845112 2024-11-21T00:29:28,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a89ef10d09f449159a033eab815fc961 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a89ef10d09f449159a033eab815fc961 2024-11-21T00:29:28,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/e6ca5e8f555644779eb41da4fcc52b60 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/e6ca5e8f555644779eb41da4fcc52b60 2024-11-21T00:29:28,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3e3e31d535fe4c2b9024be66a1fb9f6a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/3e3e31d535fe4c2b9024be66a1fb9f6a 2024-11-21T00:29:28,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/52998309678d4117bd1fe3ea3b4abaa9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/52998309678d4117bd1fe3ea3b4abaa9 2024-11-21T00:29:28,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/1532f4caf70f48818612fd6ecc4b5208 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/1532f4caf70f48818612fd6ecc4b5208 2024-11-21T00:29:28,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/e9e59320101341debaedb1e5d433d419, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/23f5e6db1cf844ad870a6a1029a95cad, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/de1abaca0bc942b78b49ff399137e559, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/17e19f15138f4a76a6674d14b8c585c7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe4e92042b8d489188574bbce756633e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c02164366e0b4bb0b25dd31b59c40432, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b34e3648c3a742bc80eca51d8a2d847c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/69bc632db5374be8b92acc4ef9c73835, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe95cb9375f248669d62e2559d5c8dd0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/7d35f64532f6451ba229efd5dd9af5d7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/aad32fa6b8b54591957e203b1ef56daa, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/01ee0bf5994b4da594726e968c3532ac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b2415a7450d544d9a10504e69349b3fe, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1a2ed17b187f4258984fb6bdb755a6a5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72e4dbe5e8144899aa3fcfb6bfa67a7d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72d5cd8e3fa54169b94c247034a64b35, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/d75f1e89906845858355a8c653ac8c4f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b0bcd5ce21cc4650945fd96b448f57c9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/3b6b5a00f0384e89ae78fcc23c044565, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fb927b93660243ddb187fcf492768025, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/26b791f86dc943ebab60e5e2f82fbca0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1e8d597d129143aca29bf7524ae684f7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/f0ad2fd609464b5fbf53183481ec0c83, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/80120b7f08a844fbab937cd646f71fce, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ce5e1be7c7ec4087babcc4491c8fb073, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ea5e2222abf24fa0a54e086ff8d4b2b8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/79cfed9c0ea447cca33e2b47a4b8ac1e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/34dded2a73d644f693d66e6710672adb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/81c627fd482a4f378c3ace7df72443d3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/52368d8e6d654a37a0b139126274307e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/4ff0120f1493467ca04d8eb04c4e58bf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/75b9a3d5a6f8406787d8007c5d98c9d5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/630eb8b609234da784b61fe90e5a25a4] to archive 2024-11-21T00:29:28,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:29:28,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/e9e59320101341debaedb1e5d433d419 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/e9e59320101341debaedb1e5d433d419 2024-11-21T00:29:28,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/23f5e6db1cf844ad870a6a1029a95cad to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/23f5e6db1cf844ad870a6a1029a95cad 2024-11-21T00:29:28,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/de1abaca0bc942b78b49ff399137e559 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/de1abaca0bc942b78b49ff399137e559 2024-11-21T00:29:28,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/17e19f15138f4a76a6674d14b8c585c7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/17e19f15138f4a76a6674d14b8c585c7 2024-11-21T00:29:28,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe4e92042b8d489188574bbce756633e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe4e92042b8d489188574bbce756633e 2024-11-21T00:29:28,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c02164366e0b4bb0b25dd31b59c40432 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c02164366e0b4bb0b25dd31b59c40432 2024-11-21T00:29:28,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b34e3648c3a742bc80eca51d8a2d847c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b34e3648c3a742bc80eca51d8a2d847c 2024-11-21T00:29:28,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/69bc632db5374be8b92acc4ef9c73835 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/69bc632db5374be8b92acc4ef9c73835 2024-11-21T00:29:28,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe95cb9375f248669d62e2559d5c8dd0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fe95cb9375f248669d62e2559d5c8dd0 2024-11-21T00:29:28,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/7d35f64532f6451ba229efd5dd9af5d7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/7d35f64532f6451ba229efd5dd9af5d7 2024-11-21T00:29:28,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/aad32fa6b8b54591957e203b1ef56daa to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/aad32fa6b8b54591957e203b1ef56daa 2024-11-21T00:29:28,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/01ee0bf5994b4da594726e968c3532ac to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/01ee0bf5994b4da594726e968c3532ac 2024-11-21T00:29:28,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b2415a7450d544d9a10504e69349b3fe to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b2415a7450d544d9a10504e69349b3fe 2024-11-21T00:29:28,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1a2ed17b187f4258984fb6bdb755a6a5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1a2ed17b187f4258984fb6bdb755a6a5 2024-11-21T00:29:28,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72e4dbe5e8144899aa3fcfb6bfa67a7d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72e4dbe5e8144899aa3fcfb6bfa67a7d 2024-11-21T00:29:28,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72d5cd8e3fa54169b94c247034a64b35 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/72d5cd8e3fa54169b94c247034a64b35 2024-11-21T00:29:28,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/d75f1e89906845858355a8c653ac8c4f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/d75f1e89906845858355a8c653ac8c4f 2024-11-21T00:29:28,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b0bcd5ce21cc4650945fd96b448f57c9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/b0bcd5ce21cc4650945fd96b448f57c9 2024-11-21T00:29:28,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/3b6b5a00f0384e89ae78fcc23c044565 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/3b6b5a00f0384e89ae78fcc23c044565 2024-11-21T00:29:28,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fb927b93660243ddb187fcf492768025 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/fb927b93660243ddb187fcf492768025 2024-11-21T00:29:28,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/26b791f86dc943ebab60e5e2f82fbca0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/26b791f86dc943ebab60e5e2f82fbca0 2024-11-21T00:29:28,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1e8d597d129143aca29bf7524ae684f7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/1e8d597d129143aca29bf7524ae684f7 2024-11-21T00:29:28,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/f0ad2fd609464b5fbf53183481ec0c83 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/f0ad2fd609464b5fbf53183481ec0c83 2024-11-21T00:29:28,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-21T00:29:28,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/80120b7f08a844fbab937cd646f71fce to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/80120b7f08a844fbab937cd646f71fce 2024-11-21T00:29:28,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ce5e1be7c7ec4087babcc4491c8fb073 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ce5e1be7c7ec4087babcc4491c8fb073 2024-11-21T00:29:28,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ea5e2222abf24fa0a54e086ff8d4b2b8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/ea5e2222abf24fa0a54e086ff8d4b2b8 2024-11-21T00:29:28,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/79cfed9c0ea447cca33e2b47a4b8ac1e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/79cfed9c0ea447cca33e2b47a4b8ac1e 2024-11-21T00:29:28,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/34dded2a73d644f693d66e6710672adb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/34dded2a73d644f693d66e6710672adb 2024-11-21T00:29:28,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/81c627fd482a4f378c3ace7df72443d3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/81c627fd482a4f378c3ace7df72443d3 2024-11-21T00:29:28,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/52368d8e6d654a37a0b139126274307e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/52368d8e6d654a37a0b139126274307e 2024-11-21T00:29:28,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/4ff0120f1493467ca04d8eb04c4e58bf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/4ff0120f1493467ca04d8eb04c4e58bf 2024-11-21T00:29:28,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/75b9a3d5a6f8406787d8007c5d98c9d5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/75b9a3d5a6f8406787d8007c5d98c9d5 2024-11-21T00:29:28,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/630eb8b609234da784b61fe90e5a25a4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/630eb8b609234da784b61fe90e5a25a4 2024-11-21T00:29:28,555 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/recovered.edits/541.seqid, newMaxSeqId=541, maxSeqId=1 2024-11-21T00:29:28,556 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0. 2024-11-21T00:29:28,556 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for 21e23f12556a7b945d55cc2f3dac60b0: 2024-11-21T00:29:28,558 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed 21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:28,558 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=21e23f12556a7b945d55cc2f3dac60b0, regionState=CLOSED 2024-11-21T00:29:28,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-21T00:29:28,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure 21e23f12556a7b945d55cc2f3dac60b0, server=0e7930017ff8,37961,1732148819586 in 2.1820 sec 2024-11-21T00:29:28,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-21T00:29:28,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=21e23f12556a7b945d55cc2f3dac60b0, UNASSIGN in 2.1880 sec 2024-11-21T00:29:28,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-21T00:29:28,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.2000 sec 2024-11-21T00:29:28,587 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148968587"}]},"ts":"1732148968587"} 2024-11-21T00:29:28,588 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-21T00:29:28,655 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-21T00:29:28,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.3090 sec 2024-11-21T00:29:28,968 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-21T00:29:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-21T00:29:30,464 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-21T00:29:30,465 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-21T00:29:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:30,466 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=152, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-21T00:29:30,467 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=152, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:30,467 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:30,469 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/recovered.edits] 2024-11-21T00:29:30,471 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1b1e0a77162d4a2bb5a28f00b2548cfd to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/1b1e0a77162d4a2bb5a28f00b2548cfd 2024-11-21T00:29:30,472 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/8aff3519c81045629381c279a032f08d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/8aff3519c81045629381c279a032f08d 2024-11-21T00:29:30,472 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/8e5b260654d3416a9dfeefa62edcc05b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/A/8e5b260654d3416a9dfeefa62edcc05b 2024-11-21T00:29:30,474 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/678114d8ed18429aa96c83d444c9eb2d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/678114d8ed18429aa96c83d444c9eb2d 2024-11-21T00:29:30,475 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/731e4aa74a2d4f658e5d3d2074002837 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/731e4aa74a2d4f658e5d3d2074002837 2024-11-21T00:29:30,477 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a8fa7ae3572c4ff3979f9cec2bbd2ef0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/B/a8fa7ae3572c4ff3979f9cec2bbd2ef0 2024-11-21T00:29:30,479 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/334c88e05f094795aa5f9086363361d9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/334c88e05f094795aa5f9086363361d9 2024-11-21T00:29:30,480 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/45009858745f4545b4643c758e1900a7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/45009858745f4545b4643c758e1900a7 2024-11-21T00:29:30,481 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/820ed8233c6b46a0bc79954dda65f5c4 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/820ed8233c6b46a0bc79954dda65f5c4 2024-11-21T00:29:30,483 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c09000b020e9415996591a4370690d79 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/c09000b020e9415996591a4370690d79 2024-11-21T00:29:30,484 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/edcfc884ce3e457b87cb526337411ec6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/C/edcfc884ce3e457b87cb526337411ec6 2024-11-21T00:29:30,486 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/recovered.edits/541.seqid to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0/recovered.edits/541.seqid 2024-11-21T00:29:30,486 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/21e23f12556a7b945d55cc2f3dac60b0 2024-11-21T00:29:30,486 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-21T00:29:30,488 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=152, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:30,490 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-21T00:29:30,491 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-21T00:29:30,493 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=152, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:30,493 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-21T00:29:30,493 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732148970493"}]},"ts":"9223372036854775807"} 2024-11-21T00:29:30,502 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-21T00:29:30,503 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 21e23f12556a7b945d55cc2f3dac60b0, NAME => 'TestAcidGuarantees,,1732148943496.21e23f12556a7b945d55cc2f3dac60b0.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T00:29:30,503 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-21T00:29:30,503 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732148970503"}]},"ts":"9223372036854775807"} 2024-11-21T00:29:30,505 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-21T00:29:30,520 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=152, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:30,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 55 msec 2024-11-21T00:29:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-21T00:29:30,567 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-21T00:29:30,577 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=240 (was 241), OpenFileDescriptor=455 (was 464), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=836 (was 799) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2260 (was 3387) 2024-11-21T00:29:30,586 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=240, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=836, ProcessCount=11, AvailableMemoryMB=2260 2024-11-21T00:29:30,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:29:30,588 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:29:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:30,589 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:29:30,589 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:30,589 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 153 2024-11-21T00:29:30,590 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:29:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-21T00:29:30,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742407_1583 (size=963) 2024-11-21T00:29:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-21T00:29:30,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-21T00:29:31,000 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f 2024-11-21T00:29:31,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742408_1584 (size=53) 2024-11-21T00:29:31,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-21T00:29:31,438 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:31,438 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing dd3ebec8f640ec7aa187d6eb7b835b19, disabling compactions & flushes 2024-11-21T00:29:31,438 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,438 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,438 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. after waiting 0 ms 2024-11-21T00:29:31,438 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,438 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,438 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:31,439 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:29:31,440 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732148971439"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148971439"}]},"ts":"1732148971439"} 2024-11-21T00:29:31,441 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-21T00:29:31,441 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:29:31,442 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148971442"}]},"ts":"1732148971442"} 2024-11-21T00:29:31,444 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-21T00:29:31,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, ASSIGN}] 2024-11-21T00:29:31,474 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, ASSIGN 2024-11-21T00:29:31,475 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, ASSIGN; state=OFFLINE, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=false 2024-11-21T00:29:31,626 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:31,635 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; OpenRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:29:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-21T00:29:31,799 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:31,822 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,823 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7285): Opening region: {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:29:31,823 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,823 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:31,823 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7327): checking encryption for dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,823 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7330): checking classloading for dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,830 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,841 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:31,841 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dd3ebec8f640ec7aa187d6eb7b835b19 columnFamilyName A 2024-11-21T00:29:31,841 DEBUG [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:31,850 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(327): Store=dd3ebec8f640ec7aa187d6eb7b835b19/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:31,851 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,866 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:31,866 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dd3ebec8f640ec7aa187d6eb7b835b19 columnFamilyName B 2024-11-21T00:29:31,866 DEBUG [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:31,867 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(327): Store=dd3ebec8f640ec7aa187d6eb7b835b19/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:31,868 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,880 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:31,880 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dd3ebec8f640ec7aa187d6eb7b835b19 columnFamilyName C 2024-11-21T00:29:31,880 DEBUG [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:31,882 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(327): Store=dd3ebec8f640ec7aa187d6eb7b835b19/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:31,886 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,887 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,891 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,905 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:29:31,906 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1085): writing seq id for dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:31,928 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:29:31,928 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1102): Opened dd3ebec8f640ec7aa187d6eb7b835b19; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68287586, jitterRate=0.017564326524734497}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:29:31,929 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1001): Region open journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:31,929 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., pid=155, masterSystemTime=1732148971798 2024-11-21T00:29:31,934 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,934 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:31,942 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=OPEN, openSeqNum=2, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:31,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-21T00:29:31,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; OpenRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 in 308 msec 2024-11-21T00:29:31,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-21T00:29:31,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, ASSIGN in 483 msec 2024-11-21T00:29:31,947 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:29:31,947 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148971947"}]},"ts":"1732148971947"} 2024-11-21T00:29:31,949 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-21T00:29:31,968 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:29:31,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3800 sec 2024-11-21T00:29:32,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-21T00:29:32,702 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-11-21T00:29:32,704 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2089b1f4 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55544bc7 2024-11-21T00:29:32,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3005670a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:32,761 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:32,762 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:32,763 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:32,764 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:32,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-21T00:29:32,766 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:29:32,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-21T00:29:32,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742409_1585 (size=999) 2024-11-21T00:29:32,777 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-21T00:29:32,777 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-21T00:29:32,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:29:32,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, REOPEN/MOVE}] 2024-11-21T00:29:32,782 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, REOPEN/MOVE 2024-11-21T00:29:32,784 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:32,785 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:29:32,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:29:32,936 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:32,937 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:32,937 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:29:32,937 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing dd3ebec8f640ec7aa187d6eb7b835b19, disabling compactions & flushes 2024-11-21T00:29:32,937 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:32,937 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:32,937 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. after waiting 0 ms 2024-11-21T00:29:32,937 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:32,958 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-21T00:29:32,967 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:32,967 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:32,967 WARN [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegionServer(3786): Not adding moved region record: dd3ebec8f640ec7aa187d6eb7b835b19 to self. 2024-11-21T00:29:32,969 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:32,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=CLOSED 2024-11-21T00:29:32,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-21T00:29:32,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 in 185 msec 2024-11-21T00:29:32,973 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, REOPEN/MOVE; state=CLOSED, location=0e7930017ff8,37961,1732148819586; forceNewPlan=false, retain=true 2024-11-21T00:29:33,128 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=OPENING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,138 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE; OpenRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:29:33,292 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,306 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:33,307 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7285): Opening region: {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:29:33,307 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,307 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:33,307 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7327): checking encryption for dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,307 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7330): checking classloading for dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,309 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,310 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:33,310 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dd3ebec8f640ec7aa187d6eb7b835b19 columnFamilyName A 2024-11-21T00:29:33,312 DEBUG [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:33,318 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(327): Store=dd3ebec8f640ec7aa187d6eb7b835b19/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:33,319 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,320 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:33,321 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dd3ebec8f640ec7aa187d6eb7b835b19 columnFamilyName B 2024-11-21T00:29:33,321 DEBUG [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:33,322 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(327): Store=dd3ebec8f640ec7aa187d6eb7b835b19/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:33,322 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,323 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-21T00:29:33,323 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dd3ebec8f640ec7aa187d6eb7b835b19 columnFamilyName C 2024-11-21T00:29:33,323 DEBUG [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:33,324 INFO [StoreOpener-dd3ebec8f640ec7aa187d6eb7b835b19-1 {}] regionserver.HStore(327): Store=dd3ebec8f640ec7aa187d6eb7b835b19/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:33,324 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:33,325 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,326 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,329 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T00:29:33,331 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1085): writing seq id for dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,332 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1102): Opened dd3ebec8f640ec7aa187d6eb7b835b19; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66214624, jitterRate=-0.013325214385986328}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T00:29:33,333 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1001): Region open journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:33,339 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., pid=160, masterSystemTime=1732148973292 2024-11-21T00:29:33,342 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=OPEN, openSeqNum=5, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,342 DEBUG [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:33,343 INFO [RS_OPEN_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:33,353 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=158 2024-11-21T00:29:33,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=158, state=SUCCESS; OpenRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 in 213 msec 2024-11-21T00:29:33,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-21T00:29:33,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, REOPEN/MOVE in 573 msec 2024-11-21T00:29:33,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-21T00:29:33,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 581 msec 2024-11-21T00:29:33,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 597 msec 2024-11-21T00:29:33,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-21T00:29:33,368 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65f51785 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1208728f 2024-11-21T00:29:33,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@92e7af3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,456 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cc71f2e to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d0a9e33 2024-11-21T00:29:33,495 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17899883, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,496 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79a7bd2b to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40e55f2a 2024-11-21T00:29:33,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b739a35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,540 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d688bcb to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@271e8143 2024-11-21T00:29:33,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20bb05a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,580 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31f7e171 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62b06a95 2024-11-21T00:29:33,607 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a5ecd59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,608 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d02ace0 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61da8c1c 2024-11-21T00:29:33,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b968040, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63054209 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560a8819 2024-11-21T00:29:33,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49019618, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,692 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fbb1399 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3df30e37 2024-11-21T00:29:33,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7887fec7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,721 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51fccca6 to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@745bf218 2024-11-21T00:29:33,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@336d4b92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,747 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x539997ae to 127.0.0.1:64241 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78f964f7 2024-11-21T00:29:33,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@219191a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:33,777 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-21T00:29:33,778 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:33,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-21T00:29:33,778 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:33,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:33,791 DEBUG [hconnection-0x7cc4e1d4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,792 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,794 DEBUG [hconnection-0x6ccf4f1f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,794 DEBUG [hconnection-0x1e9f3f40-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,795 DEBUG [hconnection-0x58064183-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,795 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,795 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,799 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,811 DEBUG [hconnection-0x7a87702b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,813 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,851 DEBUG [hconnection-0x12679c94-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:33,857 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-21T00:29:33,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:33,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:33,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:33,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:33,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:33,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:33,865 DEBUG [hconnection-0x4fc35d58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,869 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-21T00:29:33,883 DEBUG [hconnection-0x3734b559-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,885 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,885 DEBUG [hconnection-0x2f50b1e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,886 DEBUG [hconnection-0x57d2ba92-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:33,887 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,891 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:33,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:33,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149033906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:33,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149033906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:33,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149033906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:33,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149033908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149033916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411216c43c4593c154b94afc2cf6c732ae0ab_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148973856/Put/seqid=0 2024-11-21T00:29:33,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:33,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-21T00:29:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:33,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:33,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:33,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:33,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:33,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742410_1586 (size=12154) 2024-11-21T00:29:33,978 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:34,002 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411216c43c4593c154b94afc2cf6c732ae0ab_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411216c43c4593c154b94afc2cf6c732ae0ab_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:34,004 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/f7745ef404204b1caf4ce131214c0eef, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:34,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/f7745ef404204b1caf4ce131214c0eef is 175, key is test_row_0/A:col10/1732148973856/Put/seqid=0 2024-11-21T00:29:34,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149034013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149034027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149034027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149034030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742411_1587 (size=30955) 2024-11-21T00:29:34,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149034036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,043 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/f7745ef404204b1caf4ce131214c0eef 2024-11-21T00:29:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-21T00:29:34,086 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-21T00:29:34,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:34,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:34,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:34,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:34,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:34,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:34,103 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-21T00:29:34,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/f3e62dacdd504bdbb88449f31b286b3c is 50, key is test_row_0/B:col10/1732148973856/Put/seqid=0 2024-11-21T00:29:34,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742412_1588 (size=12001) 2024-11-21T00:29:34,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/f3e62dacdd504bdbb88449f31b286b3c 2024-11-21T00:29:34,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/7a4a7db6182a4e809b3096ba20fa3ee7 is 50, key is test_row_0/C:col10/1732148973856/Put/seqid=0 2024-11-21T00:29:34,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742413_1589 (size=12001) 2024-11-21T00:29:34,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149034227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149034232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/7a4a7db6182a4e809b3096ba20fa3ee7 2024-11-21T00:29:34,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149034237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149034238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149034240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,249 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-21T00:29:34,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:34,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:34,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:34,250 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:34,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:34,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:34,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/f7745ef404204b1caf4ce131214c0eef as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/f7745ef404204b1caf4ce131214c0eef 2024-11-21T00:29:34,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/f7745ef404204b1caf4ce131214c0eef, entries=150, sequenceid=18, filesize=30.2 K 2024-11-21T00:29:34,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/f3e62dacdd504bdbb88449f31b286b3c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f3e62dacdd504bdbb88449f31b286b3c 2024-11-21T00:29:34,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f3e62dacdd504bdbb88449f31b286b3c, entries=150, sequenceid=18, filesize=11.7 K 2024-11-21T00:29:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/7a4a7db6182a4e809b3096ba20fa3ee7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7a4a7db6182a4e809b3096ba20fa3ee7 2024-11-21T00:29:34,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-21T00:29:34,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7a4a7db6182a4e809b3096ba20fa3ee7, entries=150, sequenceid=18, filesize=11.7 K 2024-11-21T00:29:34,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for dd3ebec8f640ec7aa187d6eb7b835b19 in 539ms, sequenceid=18, compaction requested=false 2024-11-21T00:29:34,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:34,402 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-21T00:29:34,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:34,403 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:29:34,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:34,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:34,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:34,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:34,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:34,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:34,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112180d377a196cb42459bf069e72746e56a_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148973904/Put/seqid=0 2024-11-21T00:29:34,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742414_1590 (size=12154) 2024-11-21T00:29:34,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:34,488 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112180d377a196cb42459bf069e72746e56a_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112180d377a196cb42459bf069e72746e56a_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:34,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c93c85d2bf32448098ce87406a4d25df, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:34,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c93c85d2bf32448098ce87406a4d25df is 175, key is test_row_0/A:col10/1732148973904/Put/seqid=0 2024-11-21T00:29:34,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742415_1591 (size=30955) 2024-11-21T00:29:34,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:34,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:34,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149034561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149034563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149034578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149034576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149034578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149034677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149034682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149034682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149034684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149034692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-21T00:29:34,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149034890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149034891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149034891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149034895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:34,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149034898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:34,923 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c93c85d2bf32448098ce87406a4d25df 2024-11-21T00:29:34,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/ff58de0bae044831b113bb048c86f499 is 50, key is test_row_0/B:col10/1732148973904/Put/seqid=0 2024-11-21T00:29:34,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742416_1592 (size=12001) 2024-11-21T00:29:34,960 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/ff58de0bae044831b113bb048c86f499 2024-11-21T00:29:34,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/198495984d5741bcaf3477e374f2413b is 50, key is test_row_0/C:col10/1732148973904/Put/seqid=0 2024-11-21T00:29:35,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742417_1593 (size=12001) 2024-11-21T00:29:35,030 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/198495984d5741bcaf3477e374f2413b 2024-11-21T00:29:35,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c93c85d2bf32448098ce87406a4d25df as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c93c85d2bf32448098ce87406a4d25df 2024-11-21T00:29:35,046 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c93c85d2bf32448098ce87406a4d25df, entries=150, sequenceid=41, filesize=30.2 K 2024-11-21T00:29:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/ff58de0bae044831b113bb048c86f499 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/ff58de0bae044831b113bb048c86f499 2024-11-21T00:29:35,053 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/ff58de0bae044831b113bb048c86f499, entries=150, sequenceid=41, filesize=11.7 K 2024-11-21T00:29:35,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/198495984d5741bcaf3477e374f2413b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/198495984d5741bcaf3477e374f2413b 2024-11-21T00:29:35,058 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/198495984d5741bcaf3477e374f2413b, entries=150, sequenceid=41, filesize=11.7 K 2024-11-21T00:29:35,059 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for dd3ebec8f640ec7aa187d6eb7b835b19 in 656ms, sequenceid=41, compaction requested=false 2024-11-21T00:29:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-21T00:29:35,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-21T00:29:35,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-21T00:29:35,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2810 sec 2024-11-21T00:29:35,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.2850 sec 2024-11-21T00:29:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:35,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-21T00:29:35,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:35,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:35,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:35,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:35,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:35,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:35,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411213d8dff9c68ba40e3b2c3244b428d2db6_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148975203/Put/seqid=0 2024-11-21T00:29:35,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149035229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149035232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149035232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149035240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149035241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742418_1594 (size=17034) 2024-11-21T00:29:35,283 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:35,292 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411213d8dff9c68ba40e3b2c3244b428d2db6_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411213d8dff9c68ba40e3b2c3244b428d2db6_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:35,293 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/d2dc478cef734ea9963f13cd3bdfdb77, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:35,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/d2dc478cef734ea9963f13cd3bdfdb77 is 175, key is test_row_0/A:col10/1732148975203/Put/seqid=0 2024-11-21T00:29:35,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742419_1595 (size=48139) 2024-11-21T00:29:35,341 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=58, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/d2dc478cef734ea9963f13cd3bdfdb77 2024-11-21T00:29:35,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149035346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149035346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149035349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149035350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149035355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6b124858d73e4230ad0d0a95aeaa4082 is 50, key is test_row_0/B:col10/1732148975203/Put/seqid=0 2024-11-21T00:29:35,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742420_1596 (size=12001) 2024-11-21T00:29:35,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6b124858d73e4230ad0d0a95aeaa4082 2024-11-21T00:29:35,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/43ae9e7a5e014415b341b81d739609fc is 50, key is test_row_0/C:col10/1732148975203/Put/seqid=0 2024-11-21T00:29:35,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742421_1597 (size=12001) 2024-11-21T00:29:35,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/43ae9e7a5e014415b341b81d739609fc 2024-11-21T00:29:35,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/d2dc478cef734ea9963f13cd3bdfdb77 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/d2dc478cef734ea9963f13cd3bdfdb77 2024-11-21T00:29:35,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/d2dc478cef734ea9963f13cd3bdfdb77, entries=250, sequenceid=58, filesize=47.0 K 2024-11-21T00:29:35,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6b124858d73e4230ad0d0a95aeaa4082 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6b124858d73e4230ad0d0a95aeaa4082 2024-11-21T00:29:35,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6b124858d73e4230ad0d0a95aeaa4082, entries=150, sequenceid=58, filesize=11.7 K 2024-11-21T00:29:35,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/43ae9e7a5e014415b341b81d739609fc as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/43ae9e7a5e014415b341b81d739609fc 2024-11-21T00:29:35,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/43ae9e7a5e014415b341b81d739609fc, entries=150, sequenceid=58, filesize=11.7 K 2024-11-21T00:29:35,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for dd3ebec8f640ec7aa187d6eb7b835b19 in 311ms, sequenceid=58, compaction requested=true 2024-11-21T00:29:35,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:35,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:35,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:35,514 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:35,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:35,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:35,514 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:35,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:35,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:35,515 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:35,515 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:35,515 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:35,515 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/f7745ef404204b1caf4ce131214c0eef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c93c85d2bf32448098ce87406a4d25df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/d2dc478cef734ea9963f13cd3bdfdb77] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=107.5 K 2024-11-21T00:29:35,515 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:35,515 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/f7745ef404204b1caf4ce131214c0eef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c93c85d2bf32448098ce87406a4d25df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/d2dc478cef734ea9963f13cd3bdfdb77] 2024-11-21T00:29:35,515 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:35,515 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:35,515 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7745ef404204b1caf4ce131214c0eef, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732148973850 2024-11-21T00:29:35,516 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:35,516 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f3e62dacdd504bdbb88449f31b286b3c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/ff58de0bae044831b113bb048c86f499, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6b124858d73e4230ad0d0a95aeaa4082] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.2 K 2024-11-21T00:29:35,516 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c93c85d2bf32448098ce87406a4d25df, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732148973896 2024-11-21T00:29:35,516 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f3e62dacdd504bdbb88449f31b286b3c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732148973850 2024-11-21T00:29:35,516 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2dc478cef734ea9963f13cd3bdfdb77, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148974561 2024-11-21T00:29:35,516 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ff58de0bae044831b113bb048c86f499, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732148973896 2024-11-21T00:29:35,517 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b124858d73e4230ad0d0a95aeaa4082, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148974561 2024-11-21T00:29:35,524 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:35,542 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#513 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:35,542 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/846dbce7bd27463a9e24119056e62c08 is 50, key is test_row_0/B:col10/1732148975203/Put/seqid=0 2024-11-21T00:29:35,555 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121b6767d1f66be418695963d87ec41044a_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:35,558 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121b6767d1f66be418695963d87ec41044a_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:35,558 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b6767d1f66be418695963d87ec41044a_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:35,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:35,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:29:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:35,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742422_1598 (size=12104) 2024-11-21T00:29:35,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742423_1599 (size=4469) 2024-11-21T00:29:35,642 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#512 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:35,643 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/846dbce7bd27463a9e24119056e62c08 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/846dbce7bd27463a9e24119056e62c08 2024-11-21T00:29:35,643 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c08c55aca11048debcaff1dc4b32a4b1 is 175, key is test_row_0/A:col10/1732148975203/Put/seqid=0 2024-11-21T00:29:35,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121654de37b803c45fd912740dbddc4e739_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148975562/Put/seqid=0 2024-11-21T00:29:35,664 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 846dbce7bd27463a9e24119056e62c08(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:35,664 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:35,664 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148975514; duration=0sec 2024-11-21T00:29:35,665 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:35,665 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:35,665 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:35,666 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:35,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,666 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:35,666 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149035612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,666 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7a4a7db6182a4e809b3096ba20fa3ee7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/198495984d5741bcaf3477e374f2413b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/43ae9e7a5e014415b341b81d739609fc] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.2 K 2024-11-21T00:29:35,666 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a4a7db6182a4e809b3096ba20fa3ee7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732148973850 2024-11-21T00:29:35,667 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 198495984d5741bcaf3477e374f2413b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732148973896 2024-11-21T00:29:35,667 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 43ae9e7a5e014415b341b81d739609fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148974561 2024-11-21T00:29:35,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149035664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149035665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149035665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149035666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,710 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#515 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:35,711 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/e457bfdf6c384cd1bc358f4977cd1feb is 50, key is test_row_0/C:col10/1732148975203/Put/seqid=0 2024-11-21T00:29:35,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742425_1601 (size=12154) 2024-11-21T00:29:35,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742424_1600 (size=31058) 2024-11-21T00:29:35,721 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:35,725 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121654de37b803c45fd912740dbddc4e739_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121654de37b803c45fd912740dbddc4e739_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:35,726 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8c307c47d4d64b099348d8fb860e1d9b, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:35,726 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c08c55aca11048debcaff1dc4b32a4b1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c08c55aca11048debcaff1dc4b32a4b1 2024-11-21T00:29:35,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8c307c47d4d64b099348d8fb860e1d9b is 175, key is test_row_0/A:col10/1732148975562/Put/seqid=0 2024-11-21T00:29:35,730 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into c08c55aca11048debcaff1dc4b32a4b1(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:35,730 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:35,730 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148975514; duration=0sec 2024-11-21T00:29:35,731 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:35,731 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:35,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149035767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742426_1602 (size=12104) 2024-11-21T00:29:35,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149035782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149035783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742427_1603 (size=30955) 2024-11-21T00:29:35,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149035782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149035791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-21T00:29:35,892 INFO [Thread-2589 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-21T00:29:35,897 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:35,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-21T00:29:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-21T00:29:35,913 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:35,922 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:35,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:35,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149035969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149035990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149035990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149036002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-21T00:29:36,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149036002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,078 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-21T00:29:36,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:36,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,191 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/e457bfdf6c384cd1bc358f4977cd1feb as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e457bfdf6c384cd1bc358f4977cd1feb 2024-11-21T00:29:36,196 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=83, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8c307c47d4d64b099348d8fb860e1d9b 2024-11-21T00:29:36,196 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into e457bfdf6c384cd1bc358f4977cd1feb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:36,196 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:36,196 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148975514; duration=0sec 2024-11-21T00:29:36,196 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:36,196 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:36,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-21T00:29:36,231 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-21T00:29:36,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:36,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6d3fbf475b9f4ceda44a341b2e357d4b is 50, key is test_row_0/B:col10/1732148975562/Put/seqid=0 2024-11-21T00:29:36,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742428_1604 (size=12001) 2024-11-21T00:29:36,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6d3fbf475b9f4ceda44a341b2e357d4b 2024-11-21T00:29:36,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c45b46c4674a4c51af0f51436b4904af is 50, key is test_row_0/C:col10/1732148975562/Put/seqid=0 2024-11-21T00:29:36,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149036276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742429_1605 (size=12001) 2024-11-21T00:29:36,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c45b46c4674a4c51af0f51436b4904af 2024-11-21T00:29:36,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149036298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8c307c47d4d64b099348d8fb860e1d9b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8c307c47d4d64b099348d8fb860e1d9b 2024-11-21T00:29:36,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149036298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149036307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8c307c47d4d64b099348d8fb860e1d9b, entries=150, sequenceid=83, filesize=30.2 K 2024-11-21T00:29:36,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6d3fbf475b9f4ceda44a341b2e357d4b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6d3fbf475b9f4ceda44a341b2e357d4b 2024-11-21T00:29:36,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6d3fbf475b9f4ceda44a341b2e357d4b, entries=150, sequenceid=83, filesize=11.7 K 2024-11-21T00:29:36,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c45b46c4674a4c51af0f51436b4904af as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c45b46c4674a4c51af0f51436b4904af 2024-11-21T00:29:36,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c45b46c4674a4c51af0f51436b4904af, entries=150, sequenceid=83, filesize=11.7 K 2024-11-21T00:29:36,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for dd3ebec8f640ec7aa187d6eb7b835b19 in 758ms, sequenceid=83, compaction requested=false 2024-11-21T00:29:36,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:36,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-21T00:29:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:36,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:36,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121fc213ed2cabf427bacbaa7abb187665c_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148975619/Put/seqid=0 2024-11-21T00:29:36,391 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-21T00:29:36,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:36,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742430_1606 (size=12154) 2024-11-21T00:29:36,441 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:36,451 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121fc213ed2cabf427bacbaa7abb187665c_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121fc213ed2cabf427bacbaa7abb187665c_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:36,456 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/adf847e2881b43d0944fd9c2d333e30d, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:36,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/adf847e2881b43d0944fd9c2d333e30d is 175, key is test_row_0/A:col10/1732148975619/Put/seqid=0 2024-11-21T00:29:36,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742431_1607 (size=30955) 2024-11-21T00:29:36,505 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=100, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/adf847e2881b43d0944fd9c2d333e30d 2024-11-21T00:29:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-21T00:29:36,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/4e33ccdb19bc45608b7f0c3696474a1c is 50, key is test_row_0/B:col10/1732148975619/Put/seqid=0 2024-11-21T00:29:36,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149036531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,545 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-21T00:29:36,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:36,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:36,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742432_1608 (size=12001) 2024-11-21T00:29:36,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/4e33ccdb19bc45608b7f0c3696474a1c 2024-11-21T00:29:36,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/6330a4c1c29f4cb4b085f4bb3062f76c is 50, key is test_row_0/C:col10/1732148975619/Put/seqid=0 2024-11-21T00:29:36,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149036647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742433_1609 (size=12001) 2024-11-21T00:29:36,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/6330a4c1c29f4cb4b085f4bb3062f76c 2024-11-21T00:29:36,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/adf847e2881b43d0944fd9c2d333e30d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/adf847e2881b43d0944fd9c2d333e30d 2024-11-21T00:29:36,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/adf847e2881b43d0944fd9c2d333e30d, entries=150, sequenceid=100, filesize=30.2 K 2024-11-21T00:29:36,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/4e33ccdb19bc45608b7f0c3696474a1c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4e33ccdb19bc45608b7f0c3696474a1c 2024-11-21T00:29:36,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4e33ccdb19bc45608b7f0c3696474a1c, entries=150, sequenceid=100, filesize=11.7 K 2024-11-21T00:29:36,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/6330a4c1c29f4cb4b085f4bb3062f76c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/6330a4c1c29f4cb4b085f4bb3062f76c 2024-11-21T00:29:36,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/6330a4c1c29f4cb4b085f4bb3062f76c, entries=150, sequenceid=100, filesize=11.7 K 2024-11-21T00:29:36,702 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-21T00:29:36,703 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for dd3ebec8f640ec7aa187d6eb7b835b19 in 370ms, sequenceid=100, compaction requested=true 2024-11-21T00:29:36,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:36,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:36,707 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:36,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:36,707 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:36,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:36,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:36,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:36,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:36,710 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:36,710 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:36,710 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,710 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c08c55aca11048debcaff1dc4b32a4b1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8c307c47d4d64b099348d8fb860e1d9b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/adf847e2881b43d0944fd9c2d333e30d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=90.8 K 2024-11-21T00:29:36,710 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,710 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c08c55aca11048debcaff1dc4b32a4b1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8c307c47d4d64b099348d8fb860e1d9b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/adf847e2881b43d0944fd9c2d333e30d] 2024-11-21T00:29:36,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,711 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-21T00:29:36,711 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c08c55aca11048debcaff1dc4b32a4b1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148974561 2024-11-21T00:29:36,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:36,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:36,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:36,711 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c307c47d4d64b099348d8fb860e1d9b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732148975562 2024-11-21T00:29:36,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:36,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:36,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:36,712 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting adf847e2881b43d0944fd9c2d333e30d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732148975619 2024-11-21T00:29:36,716 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:36,716 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:36,716 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,716 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/846dbce7bd27463a9e24119056e62c08, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6d3fbf475b9f4ceda44a341b2e357d4b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4e33ccdb19bc45608b7f0c3696474a1c] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.3 K 2024-11-21T00:29:36,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 846dbce7bd27463a9e24119056e62c08, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148974561 2024-11-21T00:29:36,718 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d3fbf475b9f4ceda44a341b2e357d4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732148975562 2024-11-21T00:29:36,718 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e33ccdb19bc45608b7f0c3696474a1c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732148975619 2024-11-21T00:29:36,728 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:36,732 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#522 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:36,733 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/7435a7408d874d64b7ff3fffc2a7f3e6 is 50, key is test_row_0/B:col10/1732148975619/Put/seqid=0 2024-11-21T00:29:36,743 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112182031015eaaa41b39fa67f69dbc9c951_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:36,745 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112182031015eaaa41b39fa67f69dbc9c951_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:36,745 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112182031015eaaa41b39fa67f69dbc9c951_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:36,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:36,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:36,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742434_1610 (size=12207) 2024-11-21T00:29:36,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112188e0391ab0844e3b86f55dfec0410932_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148976529/Put/seqid=0 2024-11-21T00:29:36,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742435_1611 (size=4469) 2024-11-21T00:29:36,836 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#521 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:36,836 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/29aed4ee3d604f18b89d8686b0fc9582 is 175, key is test_row_0/A:col10/1732148975619/Put/seqid=0 2024-11-21T00:29:36,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742436_1612 (size=12154) 2024-11-21T00:29:36,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:36,853 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112188e0391ab0844e3b86f55dfec0410932_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112188e0391ab0844e3b86f55dfec0410932_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:36,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149036840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149036841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8e72debc654f4aae9b1f7a2b92235dee, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:36,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8e72debc654f4aae9b1f7a2b92235dee is 175, key is test_row_0/A:col10/1732148976529/Put/seqid=0 2024-11-21T00:29:36,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149036853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149036853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149036857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742437_1613 (size=31161) 2024-11-21T00:29:36,884 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/29aed4ee3d604f18b89d8686b0fc9582 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/29aed4ee3d604f18b89d8686b0fc9582 2024-11-21T00:29:36,894 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into 29aed4ee3d604f18b89d8686b0fc9582(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:36,895 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:36,895 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148976706; duration=0sec 2024-11-21T00:29:36,895 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:36,895 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:36,895 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:36,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742438_1614 (size=30955) 2024-11-21T00:29:36,896 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=122, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8e72debc654f4aae9b1f7a2b92235dee 2024-11-21T00:29:36,903 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:36,903 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:36,903 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:36,903 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e457bfdf6c384cd1bc358f4977cd1feb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c45b46c4674a4c51af0f51436b4904af, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/6330a4c1c29f4cb4b085f4bb3062f76c] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.3 K 2024-11-21T00:29:36,907 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e457bfdf6c384cd1bc358f4977cd1feb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732148974561 2024-11-21T00:29:36,911 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c45b46c4674a4c51af0f51436b4904af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732148975562 2024-11-21T00:29:36,912 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6330a4c1c29f4cb4b085f4bb3062f76c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732148975619 2024-11-21T00:29:36,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/2d5ad2c14e81492eadc6e6f2fe8d7cda is 50, key is test_row_0/B:col10/1732148976529/Put/seqid=0 2024-11-21T00:29:36,944 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#525 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:36,945 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/45fe840fdfc3487da2e551b280d4088a is 50, key is test_row_0/C:col10/1732148975619/Put/seqid=0 2024-11-21T00:29:36,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149036955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149036955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149036975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149036975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:36,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742439_1615 (size=12001) 2024-11-21T00:29:37,011 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/2d5ad2c14e81492eadc6e6f2fe8d7cda 2024-11-21T00:29:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-21T00:29:37,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742440_1616 (size=12207) 2024-11-21T00:29:37,072 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/45fe840fdfc3487da2e551b280d4088a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45fe840fdfc3487da2e551b280d4088a 2024-11-21T00:29:37,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c12655c2e85c43a4b4611219b65adb2c is 50, key is test_row_0/C:col10/1732148976529/Put/seqid=0 2024-11-21T00:29:37,094 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into 45fe840fdfc3487da2e551b280d4088a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:37,094 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:37,094 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148976707; duration=0sec 2024-11-21T00:29:37,094 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:37,094 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:37,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742441_1617 (size=12001) 2024-11-21T00:29:37,125 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c12655c2e85c43a4b4611219b65adb2c 2024-11-21T00:29:37,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8e72debc654f4aae9b1f7a2b92235dee as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8e72debc654f4aae9b1f7a2b92235dee 2024-11-21T00:29:37,150 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8e72debc654f4aae9b1f7a2b92235dee, entries=150, sequenceid=122, filesize=30.2 K 2024-11-21T00:29:37,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/2d5ad2c14e81492eadc6e6f2fe8d7cda as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2d5ad2c14e81492eadc6e6f2fe8d7cda 2024-11-21T00:29:37,159 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2d5ad2c14e81492eadc6e6f2fe8d7cda, entries=150, sequenceid=122, filesize=11.7 K 2024-11-21T00:29:37,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c12655c2e85c43a4b4611219b65adb2c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c12655c2e85c43a4b4611219b65adb2c 2024-11-21T00:29:37,164 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c12655c2e85c43a4b4611219b65adb2c, entries=150, sequenceid=122, filesize=11.7 K 2024-11-21T00:29:37,165 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for dd3ebec8f640ec7aa187d6eb7b835b19 in 455ms, sequenceid=122, compaction requested=false 2024-11-21T00:29:37,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:37,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:37,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-21T00:29:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-21T00:29:37,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-21T00:29:37,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2430 sec 2024-11-21T00:29:37,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.2730 sec 2024-11-21T00:29:37,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-21T00:29:37,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:37,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:37,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:37,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:37,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:37,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:37,204 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/7435a7408d874d64b7ff3fffc2a7f3e6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7435a7408d874d64b7ff3fffc2a7f3e6 2024-11-21T00:29:37,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121c2f1d26c273e4099a3a7a2b85cb38f83_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148976842/Put/seqid=0 2024-11-21T00:29:37,211 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 7435a7408d874d64b7ff3fffc2a7f3e6(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:37,211 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:37,211 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148976707; duration=0sec 2024-11-21T00:29:37,211 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:37,211 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:37,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149037237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149037244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149037251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149037259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149037259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742442_1618 (size=14794) 2024-11-21T00:29:37,276 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:37,284 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121c2f1d26c273e4099a3a7a2b85cb38f83_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121c2f1d26c273e4099a3a7a2b85cb38f83_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:37,295 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/4f9adc1bfec24856b88034c8ad18f996, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:37,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/4f9adc1bfec24856b88034c8ad18f996 is 175, key is test_row_0/A:col10/1732148976842/Put/seqid=0 2024-11-21T00:29:37,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742443_1619 (size=39749) 2024-11-21T00:29:37,340 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=139, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/4f9adc1bfec24856b88034c8ad18f996 2024-11-21T00:29:37,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149037353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/621dcaa494634f21bd7b9c33a7fffa79 is 50, key is test_row_0/B:col10/1732148976842/Put/seqid=0 2024-11-21T00:29:37,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149037361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149037362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149037363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149037362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742444_1620 (size=12151) 2024-11-21T00:29:37,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149037569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149037569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149037571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149037571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149037580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/621dcaa494634f21bd7b9c33a7fffa79 2024-11-21T00:29:37,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/e79feb3262f84cfab9d8c2fcedbcbf29 is 50, key is test_row_0/C:col10/1732148976842/Put/seqid=0 2024-11-21T00:29:37,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742445_1621 (size=12151) 2024-11-21T00:29:37,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/e79feb3262f84cfab9d8c2fcedbcbf29 2024-11-21T00:29:37,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/4f9adc1bfec24856b88034c8ad18f996 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/4f9adc1bfec24856b88034c8ad18f996 2024-11-21T00:29:37,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/4f9adc1bfec24856b88034c8ad18f996, entries=200, sequenceid=139, filesize=38.8 K 2024-11-21T00:29:37,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149037875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149037875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/621dcaa494634f21bd7b9c33a7fffa79 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/621dcaa494634f21bd7b9c33a7fffa79 2024-11-21T00:29:37,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/621dcaa494634f21bd7b9c33a7fffa79, entries=150, sequenceid=139, filesize=11.9 K 2024-11-21T00:29:37,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/e79feb3262f84cfab9d8c2fcedbcbf29 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e79feb3262f84cfab9d8c2fcedbcbf29 2024-11-21T00:29:37,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149037879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149037886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149037888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:37,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e79feb3262f84cfab9d8c2fcedbcbf29, entries=150, sequenceid=139, filesize=11.9 K 2024-11-21T00:29:37,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=134.18 KB/137400 for dd3ebec8f640ec7aa187d6eb7b835b19 in 718ms, sequenceid=139, compaction requested=true 2024-11-21T00:29:37,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:37,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:37,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:37,895 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:37,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:37,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:37,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:37,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:37,895 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:37,896 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:37,896 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:37,896 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:37,896 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/29aed4ee3d604f18b89d8686b0fc9582, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8e72debc654f4aae9b1f7a2b92235dee, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/4f9adc1bfec24856b88034c8ad18f996] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=99.5 K 2024-11-21T00:29:37,896 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:37,896 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/29aed4ee3d604f18b89d8686b0fc9582, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8e72debc654f4aae9b1f7a2b92235dee, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/4f9adc1bfec24856b88034c8ad18f996] 2024-11-21T00:29:37,896 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:37,896 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:37,897 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:37,897 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7435a7408d874d64b7ff3fffc2a7f3e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2d5ad2c14e81492eadc6e6f2fe8d7cda, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/621dcaa494634f21bd7b9c33a7fffa79] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.5 K 2024-11-21T00:29:37,897 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29aed4ee3d604f18b89d8686b0fc9582, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732148975619 2024-11-21T00:29:37,897 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7435a7408d874d64b7ff3fffc2a7f3e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732148975619 2024-11-21T00:29:37,897 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e72debc654f4aae9b1f7a2b92235dee, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732148976463 2024-11-21T00:29:37,897 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d5ad2c14e81492eadc6e6f2fe8d7cda, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732148976463 2024-11-21T00:29:37,898 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f9adc1bfec24856b88034c8ad18f996, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732148976837 2024-11-21T00:29:37,898 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 621dcaa494634f21bd7b9c33a7fffa79, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732148976842 2024-11-21T00:29:37,915 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:37,915 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/38729516b2874b98bf7738d5a2d1a930 is 50, key is test_row_0/B:col10/1732148976842/Put/seqid=0 2024-11-21T00:29:37,923 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:37,932 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112167369a3d78cc4787b5883871d8b5569c_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:37,934 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112167369a3d78cc4787b5883871d8b5569c_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:37,935 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112167369a3d78cc4787b5883871d8b5569c_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:37,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742446_1622 (size=12459) 2024-11-21T00:29:38,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742447_1623 (size=4469) 2024-11-21T00:29:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-21T00:29:38,023 INFO [Thread-2589 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-21T00:29:38,024 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-21T00:29:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-21T00:29:38,026 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:38,027 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:38,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-21T00:29:38,179 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-21T00:29:38,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:38,180 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:29:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:38,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d321c411e4c044a2a261e1d438379b3e_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148977245/Put/seqid=0 2024-11-21T00:29:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742448_1624 (size=12304) 2024-11-21T00:29:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-21T00:29:38,365 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/38729516b2874b98bf7738d5a2d1a930 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/38729516b2874b98bf7738d5a2d1a930 2024-11-21T00:29:38,371 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 38729516b2874b98bf7738d5a2d1a930(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:38,371 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:38,371 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148977895; duration=0sec 2024-11-21T00:29:38,371 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:38,371 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:38,371 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:38,373 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:38,374 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:38,374 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:38,374 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45fe840fdfc3487da2e551b280d4088a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c12655c2e85c43a4b4611219b65adb2c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e79feb3262f84cfab9d8c2fcedbcbf29] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.5 K 2024-11-21T00:29:38,374 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 45fe840fdfc3487da2e551b280d4088a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732148975619 2024-11-21T00:29:38,374 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c12655c2e85c43a4b4611219b65adb2c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732148976463 2024-11-21T00:29:38,375 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e79feb3262f84cfab9d8c2fcedbcbf29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732148976842 2024-11-21T00:29:38,385 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#533 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:38,385 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/adfa1f2578d844488d165e4218871361 is 50, key is test_row_0/C:col10/1732148976842/Put/seqid=0 2024-11-21T00:29:38,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:38,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742449_1625 (size=12459) 2024-11-21T00:29:38,412 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/adfa1f2578d844488d165e4218871361 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/adfa1f2578d844488d165e4218871361 2024-11-21T00:29:38,418 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#531 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:38,419 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/244df12bc8074116b21bb27c18c0c274 is 175, key is test_row_0/A:col10/1732148976842/Put/seqid=0 2024-11-21T00:29:38,424 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into adfa1f2578d844488d165e4218871361(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:38,424 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:38,424 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148977895; duration=0sec 2024-11-21T00:29:38,424 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:38,424 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:38,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149038420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149038420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149038424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149038435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149038436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742450_1626 (size=31413) 2024-11-21T00:29:38,465 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/244df12bc8074116b21bb27c18c0c274 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/244df12bc8074116b21bb27c18c0c274 2024-11-21T00:29:38,472 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into 244df12bc8074116b21bb27c18c0c274(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:38,472 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:38,472 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148977895; duration=0sec 2024-11-21T00:29:38,472 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:38,472 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:38,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149038539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149038540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149038542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149038551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149038556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:38,612 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d321c411e4c044a2a261e1d438379b3e_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d321c411e4c044a2a261e1d438379b3e_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:38,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e79a9765e968403eade36e08118964ff, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:38,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e79a9765e968403eade36e08118964ff is 175, key is test_row_0/A:col10/1732148977245/Put/seqid=0 2024-11-21T00:29:38,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-21T00:29:38,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742451_1627 (size=31105) 2024-11-21T00:29:38,675 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=163, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e79a9765e968403eade36e08118964ff 2024-11-21T00:29:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/53a7625f6e504c948bb3ea9e3199937b is 50, key is test_row_0/B:col10/1732148977245/Put/seqid=0 2024-11-21T00:29:38,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742452_1628 (size=12151) 2024-11-21T00:29:38,743 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/53a7625f6e504c948bb3ea9e3199937b 2024-11-21T00:29:38,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149038743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149038743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149038759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149038755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149038767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/921e90e7569a4e599845f1ae2f78de8b is 50, key is test_row_0/C:col10/1732148977245/Put/seqid=0 2024-11-21T00:29:38,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742453_1629 (size=12151) 2024-11-21T00:29:38,968 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-21T00:29:38,968 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-21T00:29:39,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149039055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149039067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149039071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149039071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149039079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-21T00:29:39,267 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/921e90e7569a4e599845f1ae2f78de8b 2024-11-21T00:29:39,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e79a9765e968403eade36e08118964ff as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e79a9765e968403eade36e08118964ff 2024-11-21T00:29:39,289 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e79a9765e968403eade36e08118964ff, entries=150, sequenceid=163, filesize=30.4 K 2024-11-21T00:29:39,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/53a7625f6e504c948bb3ea9e3199937b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/53a7625f6e504c948bb3ea9e3199937b 2024-11-21T00:29:39,312 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/53a7625f6e504c948bb3ea9e3199937b, entries=150, sequenceid=163, filesize=11.9 K 2024-11-21T00:29:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/921e90e7569a4e599845f1ae2f78de8b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/921e90e7569a4e599845f1ae2f78de8b 2024-11-21T00:29:39,339 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/921e90e7569a4e599845f1ae2f78de8b, entries=150, sequenceid=163, filesize=11.9 K 2024-11-21T00:29:39,341 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1162ms, sequenceid=163, compaction requested=false 2024-11-21T00:29:39,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:39,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:39,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-21T00:29:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-21T00:29:39,349 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-21T00:29:39,349 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3210 sec 2024-11-21T00:29:39,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.3260 sec 2024-11-21T00:29:39,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-21T00:29:39,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:39,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:39,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:39,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:39,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:39,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:39,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:39,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411215c950a82175b4637848bd87af2bd3043_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148979566/Put/seqid=0 2024-11-21T00:29:39,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742454_1630 (size=14794) 2024-11-21T00:29:39,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149039658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149039658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149039671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149039670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149039675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149039772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149039776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149039776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149039782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149039787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149039983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149039986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149039990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:39,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149039991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149040004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,029 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:40,032 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411215c950a82175b4637848bd87af2bd3043_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215c950a82175b4637848bd87af2bd3043_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:40,034 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/b17c35b70ebb49ef9eced82d561df309, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:40,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/b17c35b70ebb49ef9eced82d561df309 is 175, key is test_row_0/A:col10/1732148979566/Put/seqid=0 2024-11-21T00:29:40,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742455_1631 (size=39749) 2024-11-21T00:29:40,079 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=181, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/b17c35b70ebb49ef9eced82d561df309 2024-11-21T00:29:40,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/0b39a5b87bbf4d7abafc1dc0a4267a69 is 50, key is test_row_0/B:col10/1732148979566/Put/seqid=0 2024-11-21T00:29:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-21T00:29:40,131 INFO [Thread-2589 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-21T00:29:40,132 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-21T00:29:40,134 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-21T00:29:40,136 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:40,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:40,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742456_1632 (size=12151) 2024-11-21T00:29:40,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-21T00:29:40,295 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-21T00:29:40,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:40,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149040299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149040299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149040299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149040303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149040311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-21T00:29:40,452 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-21T00:29:40,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:40,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/0b39a5b87bbf4d7abafc1dc0a4267a69 2024-11-21T00:29:40,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/4fa4ec090eea4296a86b3bea99d425f9 is 50, key is test_row_0/C:col10/1732148979566/Put/seqid=0 2024-11-21T00:29:40,610 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-21T00:29:40,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:40,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:40,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742457_1633 (size=12151) 2024-11-21T00:29:40,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/4fa4ec090eea4296a86b3bea99d425f9 2024-11-21T00:29:40,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/b17c35b70ebb49ef9eced82d561df309 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b17c35b70ebb49ef9eced82d561df309 2024-11-21T00:29:40,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b17c35b70ebb49ef9eced82d561df309, entries=200, sequenceid=181, filesize=38.8 K 2024-11-21T00:29:40,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/0b39a5b87bbf4d7abafc1dc0a4267a69 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/0b39a5b87bbf4d7abafc1dc0a4267a69 2024-11-21T00:29:40,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/0b39a5b87bbf4d7abafc1dc0a4267a69, entries=150, sequenceid=181, filesize=11.9 K 2024-11-21T00:29:40,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/4fa4ec090eea4296a86b3bea99d425f9 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4fa4ec090eea4296a86b3bea99d425f9 2024-11-21T00:29:40,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4fa4ec090eea4296a86b3bea99d425f9, entries=150, sequenceid=181, filesize=11.9 K 2024-11-21T00:29:40,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1123ms, sequenceid=181, compaction requested=true 2024-11-21T00:29:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:40,694 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:40,694 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:40,702 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:40,702 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:40,702 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,702 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/38729516b2874b98bf7738d5a2d1a930, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/53a7625f6e504c948bb3ea9e3199937b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/0b39a5b87bbf4d7abafc1dc0a4267a69] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.9 K 2024-11-21T00:29:40,703 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:40,704 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:40,704 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,704 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/244df12bc8074116b21bb27c18c0c274, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e79a9765e968403eade36e08118964ff, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b17c35b70ebb49ef9eced82d561df309] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=99.9 K 2024-11-21T00:29:40,704 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,704 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/244df12bc8074116b21bb27c18c0c274, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e79a9765e968403eade36e08118964ff, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b17c35b70ebb49ef9eced82d561df309] 2024-11-21T00:29:40,704 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 38729516b2874b98bf7738d5a2d1a930, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732148976842 2024-11-21T00:29:40,706 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 244df12bc8074116b21bb27c18c0c274, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732148976842 2024-11-21T00:29:40,706 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 53a7625f6e504c948bb3ea9e3199937b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148977237 2024-11-21T00:29:40,708 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e79a9765e968403eade36e08118964ff, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148977237 2024-11-21T00:29:40,708 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b39a5b87bbf4d7abafc1dc0a4267a69, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148978426 2024-11-21T00:29:40,708 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b17c35b70ebb49ef9eced82d561df309, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148978384 2024-11-21T00:29:40,743 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-21T00:29:40,746 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121036b0ee1ca1f4d98a9645eefdd55bdc8_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:40,747 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:40,748 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/e84f647806e74f98b20189f7e304465b is 50, key is test_row_0/B:col10/1732148979566/Put/seqid=0 2024-11-21T00:29:40,748 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121036b0ee1ca1f4d98a9645eefdd55bdc8_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:40,749 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121036b0ee1ca1f4d98a9645eefdd55bdc8_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:40,769 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-21T00:29:40,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,770 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-21T00:29:40,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:40,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:40,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:40,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:40,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:40,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:40,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742459_1635 (size=4469) 2024-11-21T00:29:40,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742458_1634 (size=12561) 2024-11-21T00:29:40,799 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/e84f647806e74f98b20189f7e304465b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/e84f647806e74f98b20189f7e304465b 2024-11-21T00:29:40,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411216a516165d18545b6851474735b6fcdf5_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148979628/Put/seqid=0 2024-11-21T00:29:40,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:40,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:40,814 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into e84f647806e74f98b20189f7e304465b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:40,814 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:40,814 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148980694; duration=0sec 2024-11-21T00:29:40,814 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:40,814 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:40,814 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:40,815 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:40,815 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:40,815 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:40,815 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/adfa1f2578d844488d165e4218871361, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/921e90e7569a4e599845f1ae2f78de8b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4fa4ec090eea4296a86b3bea99d425f9] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=35.9 K 2024-11-21T00:29:40,816 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting adfa1f2578d844488d165e4218871361, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732148976842 2024-11-21T00:29:40,816 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 921e90e7569a4e599845f1ae2f78de8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732148977237 2024-11-21T00:29:40,817 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fa4ec090eea4296a86b3bea99d425f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148978426 2024-11-21T00:29:40,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149040835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149040835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742460_1636 (size=12304) 2024-11-21T00:29:40,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:40,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149040843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,851 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#542 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:40,852 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/45f6801f484745f49b5d040f3bfb5768 is 50, key is test_row_0/C:col10/1732148979566/Put/seqid=0 2024-11-21T00:29:40,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149040846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,857 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411216a516165d18545b6851474735b6fcdf5_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411216a516165d18545b6851474735b6fcdf5_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:40,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149040846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/09638a375a884938b91f8aec7ed5edd1, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:40,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/09638a375a884938b91f8aec7ed5edd1 is 175, key is test_row_0/A:col10/1732148979628/Put/seqid=0 2024-11-21T00:29:40,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742461_1637 (size=12561) 2024-11-21T00:29:40,915 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/45f6801f484745f49b5d040f3bfb5768 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45f6801f484745f49b5d040f3bfb5768 2024-11-21T00:29:40,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742462_1638 (size=31105) 2024-11-21T00:29:40,936 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into 45f6801f484745f49b5d040f3bfb5768(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:40,936 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:40,936 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148980694; duration=0sec 2024-11-21T00:29:40,937 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:40,937 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:40,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149040948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149040954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149040958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149040964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:40,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149040975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149041159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149041163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149041168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,175 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#539 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:41,175 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e041f3b0cc2a425ab354974b18631cf1 is 175, key is test_row_0/A:col10/1732148979566/Put/seqid=0 2024-11-21T00:29:41,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149041171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149041181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742463_1639 (size=31515) 2024-11-21T00:29:41,215 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e041f3b0cc2a425ab354974b18631cf1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e041f3b0cc2a425ab354974b18631cf1 2024-11-21T00:29:41,228 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into e041f3b0cc2a425ab354974b18631cf1(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:41,228 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:41,228 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148980694; duration=0sec 2024-11-21T00:29:41,228 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:41,228 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-21T00:29:41,320 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=203, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/09638a375a884938b91f8aec7ed5edd1 2024-11-21T00:29:41,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5169f4d5a5794991bdfbe4c3ba7f2b6d is 50, key is test_row_0/B:col10/1732148979628/Put/seqid=0 2024-11-21T00:29:41,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742464_1640 (size=12151) 2024-11-21T00:29:41,371 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5169f4d5a5794991bdfbe4c3ba7f2b6d 2024-11-21T00:29:41,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/945d9a9d5874432ca741e4a221a4a970 is 50, key is test_row_0/C:col10/1732148979628/Put/seqid=0 2024-11-21T00:29:41,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742465_1641 (size=12151) 2024-11-21T00:29:41,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149041463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149041466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149041472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149041483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149041478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,821 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/945d9a9d5874432ca741e4a221a4a970 2024-11-21T00:29:41,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/09638a375a884938b91f8aec7ed5edd1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/09638a375a884938b91f8aec7ed5edd1 2024-11-21T00:29:41,848 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/09638a375a884938b91f8aec7ed5edd1, entries=150, sequenceid=203, filesize=30.4 K 2024-11-21T00:29:41,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5169f4d5a5794991bdfbe4c3ba7f2b6d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5169f4d5a5794991bdfbe4c3ba7f2b6d 2024-11-21T00:29:41,853 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5169f4d5a5794991bdfbe4c3ba7f2b6d, entries=150, sequenceid=203, filesize=11.9 K 2024-11-21T00:29:41,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/945d9a9d5874432ca741e4a221a4a970 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/945d9a9d5874432ca741e4a221a4a970 2024-11-21T00:29:41,859 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/945d9a9d5874432ca741e4a221a4a970, entries=150, sequenceid=203, filesize=11.9 K 2024-11-21T00:29:41,860 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1089ms, sequenceid=203, compaction requested=false 2024-11-21T00:29:41,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:41,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:41,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-21T00:29:41,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-21T00:29:41,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-21T00:29:41,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7250 sec 2024-11-21T00:29:41,865 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.7320 sec 2024-11-21T00:29:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:41,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-21T00:29:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:41,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411215901e1339300440383f0841b0eee311e_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148980823/Put/seqid=0 2024-11-21T00:29:41,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742466_1642 (size=12304) 2024-11-21T00:29:41,993 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:41,995 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411215901e1339300440383f0841b0eee311e_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215901e1339300440383f0841b0eee311e_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:41,995 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e30904b3d98c4d2dbb825964ddc50614, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:41,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e30904b3d98c4d2dbb825964ddc50614 is 175, key is test_row_0/A:col10/1732148980823/Put/seqid=0 2024-11-21T00:29:41,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149041995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149041996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149041996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:41,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149041997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149041998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742467_1643 (size=31105) 2024-11-21T00:29:42,010 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=221, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e30904b3d98c4d2dbb825964ddc50614 2024-11-21T00:29:42,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/3c364b4417544b19906c962fefb336f8 is 50, key is test_row_0/B:col10/1732148980823/Put/seqid=0 2024-11-21T00:29:42,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149042103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149042103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149042103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149042103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742468_1644 (size=12151) 2024-11-21T00:29:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-21T00:29:42,252 INFO [Thread-2589 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-21T00:29:42,264 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-21T00:29:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-21T00:29:42,266 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:42,266 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:42,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:42,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149042305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149042307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149042307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149042311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-21T00:29:42,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:42,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:42,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/3c364b4417544b19906c962fefb336f8 2024-11-21T00:29:42,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/9f87a2492d4f49a39dec927cb6a3d7ff is 50, key is test_row_0/C:col10/1732148980823/Put/seqid=0 2024-11-21T00:29:42,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742469_1645 (size=12151) 2024-11-21T00:29:42,556 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/9f87a2492d4f49a39dec927cb6a3d7ff 2024-11-21T00:29:42,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/e30904b3d98c4d2dbb825964ddc50614 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e30904b3d98c4d2dbb825964ddc50614 2024-11-21T00:29:42,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e30904b3d98c4d2dbb825964ddc50614, entries=150, sequenceid=221, filesize=30.4 K 2024-11-21T00:29:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/3c364b4417544b19906c962fefb336f8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c364b4417544b19906c962fefb336f8 2024-11-21T00:29:42,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c364b4417544b19906c962fefb336f8, entries=150, sequenceid=221, filesize=11.9 K 2024-11-21T00:29:42,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/9f87a2492d4f49a39dec927cb6a3d7ff as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/9f87a2492d4f49a39dec927cb6a3d7ff 2024-11-21T00:29:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-21T00:29:42,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:42,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:42,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/9f87a2492d4f49a39dec927cb6a3d7ff, entries=150, sequenceid=221, filesize=11.9 K 2024-11-21T00:29:42,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for dd3ebec8f640ec7aa187d6eb7b835b19 in 604ms, sequenceid=221, compaction requested=true 2024-11-21T00:29:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:42,579 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:42,579 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:42,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:42,580 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:42,580 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:42,580 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:42,580 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:42,580 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,580 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,580 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e041f3b0cc2a425ab354974b18631cf1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/09638a375a884938b91f8aec7ed5edd1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e30904b3d98c4d2dbb825964ddc50614] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=91.5 K 2024-11-21T00:29:42,580 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/e84f647806e74f98b20189f7e304465b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5169f4d5a5794991bdfbe4c3ba7f2b6d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c364b4417544b19906c962fefb336f8] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.0 K 2024-11-21T00:29:42,580 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,580 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e041f3b0cc2a425ab354974b18631cf1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/09638a375a884938b91f8aec7ed5edd1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e30904b3d98c4d2dbb825964ddc50614] 2024-11-21T00:29:42,581 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting e84f647806e74f98b20189f7e304465b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148978426 2024-11-21T00:29:42,581 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e041f3b0cc2a425ab354974b18631cf1, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148978426 2024-11-21T00:29:42,581 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5169f4d5a5794991bdfbe4c3ba7f2b6d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732148979628 2024-11-21T00:29:42,581 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09638a375a884938b91f8aec7ed5edd1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732148979628 2024-11-21T00:29:42,581 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting e30904b3d98c4d2dbb825964ddc50614, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148980823 2024-11-21T00:29:42,581 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c364b4417544b19906c962fefb336f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148980823 2024-11-21T00:29:42,590 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:42,591 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#548 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:42,591 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/f308d5f2e6a24c7db5d027ec3245e26c is 50, key is test_row_0/B:col10/1732148980823/Put/seqid=0 2024-11-21T00:29:42,593 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121bde59c26acad4a97b974f97515b3ed41_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:42,595 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121bde59c26acad4a97b974f97515b3ed41_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:42,595 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121bde59c26acad4a97b974f97515b3ed41_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:42,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742470_1646 (size=12663) 2024-11-21T00:29:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:42,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-21T00:29:42,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:42,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:42,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:42,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:42,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:42,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:42,626 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/f308d5f2e6a24c7db5d027ec3245e26c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f308d5f2e6a24c7db5d027ec3245e26c 2024-11-21T00:29:42,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112158ed329f8fc0493f802963cb92e81355_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148982610/Put/seqid=0 2024-11-21T00:29:42,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742471_1647 (size=4469) 2024-11-21T00:29:42,629 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#549 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:42,630 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/92f680fe9a86492b911f7558dff82828 is 175, key is test_row_0/A:col10/1732148980823/Put/seqid=0 2024-11-21T00:29:42,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742472_1648 (size=14794) 2024-11-21T00:29:42,632 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:42,634 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into f308d5f2e6a24c7db5d027ec3245e26c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:42,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:42,634 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148982579; duration=0sec 2024-11-21T00:29:42,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:42,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:42,634 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149042628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,636 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:42,636 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:42,636 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,636 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45f6801f484745f49b5d040f3bfb5768, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/945d9a9d5874432ca741e4a221a4a970, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/9f87a2492d4f49a39dec927cb6a3d7ff] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.0 K 2024-11-21T00:29:42,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742473_1649 (size=31617) 2024-11-21T00:29:42,636 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 45f6801f484745f49b5d040f3bfb5768, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732148978426 2024-11-21T00:29:42,637 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 945d9a9d5874432ca741e4a221a4a970, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732148979628 2024-11-21T00:29:42,637 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f87a2492d4f49a39dec927cb6a3d7ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148980823 2024-11-21T00:29:42,639 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112158ed329f8fc0493f802963cb92e81355_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112158ed329f8fc0493f802963cb92e81355_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:42,640 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/0703ce62599a45d2be11b1739d2467d8, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:42,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/0703ce62599a45d2be11b1739d2467d8 is 175, key is test_row_0/A:col10/1732148982610/Put/seqid=0 2024-11-21T00:29:42,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149042634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149042635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149042635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742474_1650 (size=39749) 2024-11-21T00:29:42,646 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#551 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:42,646 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/ca7d7bb0ee334c2cb05425706d4ab23e is 50, key is test_row_0/C:col10/1732148980823/Put/seqid=0 2024-11-21T00:29:42,651 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=243, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/0703ce62599a45d2be11b1739d2467d8 2024-11-21T00:29:42,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/2e50d77664c643a7ba5043d9402c022b is 50, key is test_row_0/B:col10/1732148982610/Put/seqid=0 2024-11-21T00:29:42,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742475_1651 (size=12663) 2024-11-21T00:29:42,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742476_1652 (size=12151) 2024-11-21T00:29:42,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/2e50d77664c643a7ba5043d9402c022b 2024-11-21T00:29:42,729 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:42,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:42,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149042736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/a11fabf469c644d5a9a1ee772e50f026 is 50, key is test_row_0/C:col10/1732148982610/Put/seqid=0 2024-11-21T00:29:42,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149042743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149042744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149042750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742477_1653 (size=12151) 2024-11-21T00:29:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-21T00:29:42,881 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:42,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:42,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:42,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:42,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149042941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149042948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149042948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:42,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:42,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149042954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149043006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:43,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:43,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,040 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/92f680fe9a86492b911f7558dff82828 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/92f680fe9a86492b911f7558dff82828 2024-11-21T00:29:43,043 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into 92f680fe9a86492b911f7558dff82828(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:43,043 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:43,043 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148982579; duration=0sec 2024-11-21T00:29:43,043 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:43,043 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:43,113 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/ca7d7bb0ee334c2cb05425706d4ab23e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/ca7d7bb0ee334c2cb05425706d4ab23e 2024-11-21T00:29:43,121 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into ca7d7bb0ee334c2cb05425706d4ab23e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:43,122 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:43,122 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148982579; duration=0sec 2024-11-21T00:29:43,122 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:43,122 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:43,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:43,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:43,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/a11fabf469c644d5a9a1ee772e50f026 2024-11-21T00:29:43,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/0703ce62599a45d2be11b1739d2467d8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/0703ce62599a45d2be11b1739d2467d8 2024-11-21T00:29:43,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/0703ce62599a45d2be11b1739d2467d8, entries=200, sequenceid=243, filesize=38.8 K 2024-11-21T00:29:43,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/2e50d77664c643a7ba5043d9402c022b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2e50d77664c643a7ba5043d9402c022b 2024-11-21T00:29:43,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2e50d77664c643a7ba5043d9402c022b, entries=150, sequenceid=243, filesize=11.9 K 2024-11-21T00:29:43,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/a11fabf469c644d5a9a1ee772e50f026 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/a11fabf469c644d5a9a1ee772e50f026 2024-11-21T00:29:43,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/a11fabf469c644d5a9a1ee772e50f026, entries=150, sequenceid=243, filesize=11.9 K 2024-11-21T00:29:43,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for dd3ebec8f640ec7aa187d6eb7b835b19 in 605ms, sequenceid=243, compaction requested=false 2024-11-21T00:29:43,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:43,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-21T00:29:43,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:43,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:43,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:43,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:43,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:43,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:43,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112135867b3ae4494636ab8a68dd7caa1b49_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148982622/Put/seqid=0 2024-11-21T00:29:43,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742478_1654 (size=14894) 2024-11-21T00:29:43,258 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:43,260 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112135867b3ae4494636ab8a68dd7caa1b49_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112135867b3ae4494636ab8a68dd7caa1b49_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:43,263 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/1d868957572a415a941da92c1ef2c458, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:43,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/1d868957572a415a941da92c1ef2c458 is 175, key is test_row_0/A:col10/1732148982622/Put/seqid=0 2024-11-21T00:29:43,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742479_1655 (size=39849) 2024-11-21T00:29:43,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149043270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149043307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149043308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149043308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,345 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:43,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:43,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-21T00:29:43,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149043408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149043412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149043412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149043415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,498 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:43,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:43,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149043611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149043616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149043619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149043627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,658 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:43,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:43,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,675 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/1d868957572a415a941da92c1ef2c458 2024-11-21T00:29:43,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/4c3f9426c8934814adae8b30bef31d5d is 50, key is test_row_0/B:col10/1732148982622/Put/seqid=0 2024-11-21T00:29:43,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742480_1656 (size=12201) 2024-11-21T00:29:43,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/4c3f9426c8934814adae8b30bef31d5d 2024-11-21T00:29:43,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c5c6fabc059a4bd79b9f243d61523008 is 50, key is test_row_0/C:col10/1732148982622/Put/seqid=0 2024-11-21T00:29:43,827 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:43,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:43,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,831 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742481_1657 (size=12201) 2024-11-21T00:29:43,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149043919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149043927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149043928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:43,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149043936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,983 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:43,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:43,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:43,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:43,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:43,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,135 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:44,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:44,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c5c6fabc059a4bd79b9f243d61523008 2024-11-21T00:29:44,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/1d868957572a415a941da92c1ef2c458 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/1d868957572a415a941da92c1ef2c458 2024-11-21T00:29:44,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/1d868957572a415a941da92c1ef2c458, entries=200, sequenceid=262, filesize=38.9 K 2024-11-21T00:29:44,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/4c3f9426c8934814adae8b30bef31d5d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4c3f9426c8934814adae8b30bef31d5d 2024-11-21T00:29:44,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4c3f9426c8934814adae8b30bef31d5d, entries=150, sequenceid=262, filesize=11.9 K 2024-11-21T00:29:44,293 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:44,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:44,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c5c6fabc059a4bd79b9f243d61523008 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c5c6fabc059a4bd79b9f243d61523008 2024-11-21T00:29:44,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c5c6fabc059a4bd79b9f243d61523008, entries=150, sequenceid=262, filesize=11.9 K 2024-11-21T00:29:44,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=127.47 KB/130530 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1051ms, sequenceid=262, compaction requested=true 2024-11-21T00:29:44,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:44,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:44,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:44,298 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:44,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:44,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:44,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:44,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:29:44,298 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:44,299 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111215 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:44,299 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:44,299 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,299 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/92f680fe9a86492b911f7558dff82828, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/0703ce62599a45d2be11b1739d2467d8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/1d868957572a415a941da92c1ef2c458] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=108.6 K 2024-11-21T00:29:44,299 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,299 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/92f680fe9a86492b911f7558dff82828, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/0703ce62599a45d2be11b1739d2467d8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/1d868957572a415a941da92c1ef2c458] 2024-11-21T00:29:44,299 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92f680fe9a86492b911f7558dff82828, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148980823 2024-11-21T00:29:44,300 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0703ce62599a45d2be11b1739d2467d8, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732148981994 2024-11-21T00:29:44,300 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d868957572a415a941da92c1ef2c458, keycount=200, bloomtype=ROW, size=38.9 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732148982622 2024-11-21T00:29:44,303 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:44,303 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:44,303 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,303 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f308d5f2e6a24c7db5d027ec3245e26c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2e50d77664c643a7ba5043d9402c022b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4c3f9426c8934814adae8b30bef31d5d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.1 K 2024-11-21T00:29:44,304 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting f308d5f2e6a24c7db5d027ec3245e26c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148980823 2024-11-21T00:29:44,304 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e50d77664c643a7ba5043d9402c022b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732148981994 2024-11-21T00:29:44,304 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c3f9426c8934814adae8b30bef31d5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732148982622 2024-11-21T00:29:44,307 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:44,311 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121136db21767a64ba8b77af9acf8a57f85_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:44,313 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121136db21767a64ba8b77af9acf8a57f85_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:44,313 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121136db21767a64ba8b77af9acf8a57f85_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:44,314 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#558 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:44,314 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/80fc00ce2db34a98a2f1e0469689bbac is 50, key is test_row_0/B:col10/1732148982622/Put/seqid=0 2024-11-21T00:29:44,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742483_1659 (size=12815) 2024-11-21T00:29:44,339 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/80fc00ce2db34a98a2f1e0469689bbac as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/80fc00ce2db34a98a2f1e0469689bbac 2024-11-21T00:29:44,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742482_1658 (size=4469) 2024-11-21T00:29:44,344 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 80fc00ce2db34a98a2f1e0469689bbac(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:44,344 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:44,344 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148984298; duration=0sec 2024-11-21T00:29:44,344 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:44,344 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:44,344 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:44,345 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#557 average throughput is 0.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:44,345 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:44,345 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/9cfc72c0a475409b8074125bfb58b384 is 175, key is test_row_0/A:col10/1732148982622/Put/seqid=0 2024-11-21T00:29:44,345 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:44,345 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,345 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/ca7d7bb0ee334c2cb05425706d4ab23e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/a11fabf469c644d5a9a1ee772e50f026, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c5c6fabc059a4bd79b9f243d61523008] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.1 K 2024-11-21T00:29:44,346 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting ca7d7bb0ee334c2cb05425706d4ab23e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732148980823 2024-11-21T00:29:44,346 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting a11fabf469c644d5a9a1ee772e50f026, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732148981994 2024-11-21T00:29:44,347 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c5c6fabc059a4bd79b9f243d61523008, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732148982622 2024-11-21T00:29:44,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742484_1660 (size=31769) 2024-11-21T00:29:44,356 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#559 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:44,356 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/7f155040de1b4748b2ac98e0d42a4d74 is 50, key is test_row_0/C:col10/1732148982622/Put/seqid=0 2024-11-21T00:29:44,360 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/9cfc72c0a475409b8074125bfb58b384 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/9cfc72c0a475409b8074125bfb58b384 2024-11-21T00:29:44,366 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into 9cfc72c0a475409b8074125bfb58b384(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:44,366 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:44,366 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148984298; duration=0sec 2024-11-21T00:29:44,366 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:44,366 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:44,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742485_1661 (size=12815) 2024-11-21T00:29:44,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-21T00:29:44,379 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/7f155040de1b4748b2ac98e0d42a4d74 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7f155040de1b4748b2ac98e0d42a4d74 2024-11-21T00:29:44,385 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into 7f155040de1b4748b2ac98e0d42a4d74(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:44,385 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:44,385 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148984298; duration=0sec 2024-11-21T00:29:44,385 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:44,385 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:44,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:44,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-21T00:29:44,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:44,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:44,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:44,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:44,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:44,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:44,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121df3415c780e1437481a925bb71f4eaf7_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148983283/Put/seqid=0 2024-11-21T00:29:44,446 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742486_1662 (size=14994) 2024-11-21T00:29:44,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:44,449 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:44,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:44,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149044448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149044449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149044451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149044451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,463 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121df3415c780e1437481a925bb71f4eaf7_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121df3415c780e1437481a925bb71f4eaf7_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:44,464 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/a6bf789230eb453aafe809420d9b59a5, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:44,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/a6bf789230eb453aafe809420d9b59a5 is 175, key is test_row_0/A:col10/1732148983283/Put/seqid=0 2024-11-21T00:29:44,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742487_1663 (size=39949) 2024-11-21T00:29:44,497 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=288, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/a6bf789230eb453aafe809420d9b59a5 2024-11-21T00:29:44,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5cd1b5cf56284ab8822b484add208218 is 50, key is test_row_0/B:col10/1732148983283/Put/seqid=0 2024-11-21T00:29:44,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149044555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149044555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149044555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742488_1664 (size=12301) 2024-11-21T00:29:44,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5cd1b5cf56284ab8822b484add208218 2024-11-21T00:29:44,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/4936bc7ecbc04e248a0b0469555c607a is 50, key is test_row_0/C:col10/1732148983283/Put/seqid=0 2024-11-21T00:29:44,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742489_1665 (size=12301) 2024-11-21T00:29:44,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:44,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:44,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,754 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:44,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:44,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149044757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149044758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:44,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149044759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:44,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:44,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:44,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:44,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:44,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/4936bc7ecbc04e248a0b0469555c607a 2024-11-21T00:29:44,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/a6bf789230eb453aafe809420d9b59a5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/a6bf789230eb453aafe809420d9b59a5 2024-11-21T00:29:45,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/a6bf789230eb453aafe809420d9b59a5, entries=200, sequenceid=288, filesize=39.0 K 2024-11-21T00:29:45,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5cd1b5cf56284ab8822b484add208218 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5cd1b5cf56284ab8822b484add208218 2024-11-21T00:29:45,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5cd1b5cf56284ab8822b484add208218, entries=150, sequenceid=288, filesize=12.0 K 2024-11-21T00:29:45,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/4936bc7ecbc04e248a0b0469555c607a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4936bc7ecbc04e248a0b0469555c607a 2024-11-21T00:29:45,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4936bc7ecbc04e248a0b0469555c607a, entries=150, sequenceid=288, filesize=12.0 K 2024-11-21T00:29:45,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for dd3ebec8f640ec7aa187d6eb7b835b19 in 595ms, sequenceid=288, compaction requested=false 2024-11-21T00:29:45,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:45,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:45,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-21T00:29:45,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:45,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:45,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:45,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:45,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:45,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:45,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112102ecc70b234446b5be4dc5efc433ab50_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148984448/Put/seqid=0 2024-11-21T00:29:45,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742490_1666 (size=12454) 2024-11-21T00:29:45,063 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:45,068 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:45,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:45,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,072 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112102ecc70b234446b5be4dc5efc433ab50_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112102ecc70b234446b5be4dc5efc433ab50_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:45,073 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/3c8733706a9d454396d42789e76c4f74, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:45,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/3c8733706a9d454396d42789e76c4f74 is 175, key is test_row_0/A:col10/1732148984448/Put/seqid=0 2024-11-21T00:29:45,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149045077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149045080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149045080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149045082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742491_1667 (size=31255) 2024-11-21T00:29:45,123 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/3c8733706a9d454396d42789e76c4f74 2024-11-21T00:29:45,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/113a81f1ab074513bea7549edf773d8d is 50, key is test_row_0/B:col10/1732148984448/Put/seqid=0 2024-11-21T00:29:45,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742492_1668 (size=12301) 2024-11-21T00:29:45,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/113a81f1ab074513bea7549edf773d8d 2024-11-21T00:29:45,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/20ff658f24364fa28fa7fdac5447d761 is 50, key is test_row_0/C:col10/1732148984448/Put/seqid=0 2024-11-21T00:29:45,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742493_1669 (size=12301) 2024-11-21T00:29:45,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149045182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149045183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149045183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149045186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,223 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:45,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:45,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,375 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:45,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:45,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149045386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149045386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149045386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149045388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149045455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,527 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:45,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:45,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:45,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/20ff658f24364fa28fa7fdac5447d761 2024-11-21T00:29:45,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/3c8733706a9d454396d42789e76c4f74 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/3c8733706a9d454396d42789e76c4f74 2024-11-21T00:29:45,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/3c8733706a9d454396d42789e76c4f74, entries=150, sequenceid=303, filesize=30.5 K 2024-11-21T00:29:45,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/113a81f1ab074513bea7549edf773d8d as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/113a81f1ab074513bea7549edf773d8d 2024-11-21T00:29:45,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/113a81f1ab074513bea7549edf773d8d, entries=150, sequenceid=303, filesize=12.0 K 2024-11-21T00:29:45,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/20ff658f24364fa28fa7fdac5447d761 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/20ff658f24364fa28fa7fdac5447d761 2024-11-21T00:29:45,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/20ff658f24364fa28fa7fdac5447d761, entries=150, sequenceid=303, filesize=12.0 K 2024-11-21T00:29:45,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for dd3ebec8f640ec7aa187d6eb7b835b19 in 567ms, sequenceid=303, compaction requested=true 2024-11-21T00:29:45,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:45,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:45,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:45,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:45,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-21T00:29:45,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:45,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-21T00:29:45,596 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:45,598 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:45,607 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102973 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:45,607 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:45,607 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,607 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/9cfc72c0a475409b8074125bfb58b384, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/a6bf789230eb453aafe809420d9b59a5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/3c8733706a9d454396d42789e76c4f74] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=100.6 K 2024-11-21T00:29:45,607 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,607 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/9cfc72c0a475409b8074125bfb58b384, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/a6bf789230eb453aafe809420d9b59a5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/3c8733706a9d454396d42789e76c4f74] 2024-11-21T00:29:45,607 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37417 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:45,608 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:45,608 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,608 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7f155040de1b4748b2ac98e0d42a4d74, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4936bc7ecbc04e248a0b0469555c607a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/20ff658f24364fa28fa7fdac5447d761] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.5 K 2024-11-21T00:29:45,611 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f155040de1b4748b2ac98e0d42a4d74, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732148982622 2024-11-21T00:29:45,611 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cfc72c0a475409b8074125bfb58b384, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732148982622 2024-11-21T00:29:45,612 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 4936bc7ecbc04e248a0b0469555c607a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148983269 2024-11-21T00:29:45,612 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6bf789230eb453aafe809420d9b59a5, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148983269 2024-11-21T00:29:45,613 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 20ff658f24364fa28fa7fdac5447d761, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732148984432 2024-11-21T00:29:45,613 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c8733706a9d454396d42789e76c4f74, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732148984432 2024-11-21T00:29:45,624 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#566 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:45,625 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c1c76b2d4b8a4a6f8e9bf666b2caff62 is 50, key is test_row_0/C:col10/1732148984448/Put/seqid=0 2024-11-21T00:29:45,626 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:45,645 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121314dc448e011447e9093962f5742de1b_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:45,647 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121314dc448e011447e9093962f5742de1b_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:45,648 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121314dc448e011447e9093962f5742de1b_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:45,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742494_1670 (size=13017) 2024-11-21T00:29:45,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742495_1671 (size=4469) 2024-11-21T00:29:45,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-21T00:29:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:45,688 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-21T00:29:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:45,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:45,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112128f16192dabc42efac85d44ae739e723_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148985074/Put/seqid=0 2024-11-21T00:29:45,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149045727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149045727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149045727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149045729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742496_1672 (size=12454) 2024-11-21T00:29:45,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149045832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149045833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149045833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:45,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149045833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149046036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149046039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149046043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149046047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,075 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#567 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:46,075 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5e0a5e94ff4e477189a3a5e0c334f7d1 is 175, key is test_row_0/A:col10/1732148984448/Put/seqid=0 2024-11-21T00:29:46,079 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/c1c76b2d4b8a4a6f8e9bf666b2caff62 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c1c76b2d4b8a4a6f8e9bf666b2caff62 2024-11-21T00:29:46,083 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into c1c76b2d4b8a4a6f8e9bf666b2caff62(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:46,083 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:46,083 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148985596; duration=0sec 2024-11-21T00:29:46,083 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:46,083 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:46,083 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:46,084 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37417 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:46,084 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:46,085 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,085 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/80fc00ce2db34a98a2f1e0469689bbac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5cd1b5cf56284ab8822b484add208218, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/113a81f1ab074513bea7549edf773d8d] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.5 K 2024-11-21T00:29:46,085 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 80fc00ce2db34a98a2f1e0469689bbac, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732148982622 2024-11-21T00:29:46,085 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cd1b5cf56284ab8822b484add208218, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732148983269 2024-11-21T00:29:46,085 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 113a81f1ab074513bea7549edf773d8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732148984432 2024-11-21T00:29:46,114 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#569 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:46,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742497_1673 (size=31971) 2024-11-21T00:29:46,115 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/3c8bbccd0407434dac84aaeaf19c5c95 is 50, key is test_row_0/B:col10/1732148984448/Put/seqid=0 2024-11-21T00:29:46,125 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5e0a5e94ff4e477189a3a5e0c334f7d1 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5e0a5e94ff4e477189a3a5e0c334f7d1 2024-11-21T00:29:46,139 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into 5e0a5e94ff4e477189a3a5e0c334f7d1(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:46,139 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:46,139 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148985596; duration=0sec 2024-11-21T00:29:46,139 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:46,139 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:46,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:46,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742498_1674 (size=13017) 2024-11-21T00:29:46,160 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112128f16192dabc42efac85d44ae739e723_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112128f16192dabc42efac85d44ae739e723_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:46,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/dcf89d0a2830464ea9c6628109e5501c, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:46,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/dcf89d0a2830464ea9c6628109e5501c is 175, key is test_row_0/A:col10/1732148985074/Put/seqid=0 2024-11-21T00:29:46,167 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/3c8bbccd0407434dac84aaeaf19c5c95 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c8bbccd0407434dac84aaeaf19c5c95 2024-11-21T00:29:46,175 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 3c8bbccd0407434dac84aaeaf19c5c95(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:46,175 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:46,175 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148985596; duration=0sec 2024-11-21T00:29:46,175 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:46,175 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:46,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742499_1675 (size=31255) 2024-11-21T00:29:46,188 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=324, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/dcf89d0a2830464ea9c6628109e5501c 2024-11-21T00:29:46,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/8a7eb9cbb21f44a98fa66af438efa883 is 50, key is test_row_0/B:col10/1732148985074/Put/seqid=0 2024-11-21T00:29:46,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742500_1676 (size=12301) 2024-11-21T00:29:46,198 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/8a7eb9cbb21f44a98fa66af438efa883 2024-11-21T00:29:46,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/d7fa1d6cc520413b82ba9b0010dbae7b is 50, key is test_row_0/C:col10/1732148985074/Put/seqid=0 2024-11-21T00:29:46,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742501_1677 (size=12301) 2024-11-21T00:29:46,239 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/d7fa1d6cc520413b82ba9b0010dbae7b 2024-11-21T00:29:46,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/dcf89d0a2830464ea9c6628109e5501c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/dcf89d0a2830464ea9c6628109e5501c 2024-11-21T00:29:46,247 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/dcf89d0a2830464ea9c6628109e5501c, entries=150, sequenceid=324, filesize=30.5 K 2024-11-21T00:29:46,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/8a7eb9cbb21f44a98fa66af438efa883 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/8a7eb9cbb21f44a98fa66af438efa883 2024-11-21T00:29:46,251 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/8a7eb9cbb21f44a98fa66af438efa883, entries=150, sequenceid=324, filesize=12.0 K 2024-11-21T00:29:46,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/d7fa1d6cc520413b82ba9b0010dbae7b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/d7fa1d6cc520413b82ba9b0010dbae7b 2024-11-21T00:29:46,259 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/d7fa1d6cc520413b82ba9b0010dbae7b, entries=150, sequenceid=324, filesize=12.0 K 2024-11-21T00:29:46,260 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for dd3ebec8f640ec7aa187d6eb7b835b19 in 572ms, sequenceid=324, compaction requested=false 2024-11-21T00:29:46,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:46,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-21T00:29:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-21T00:29:46,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-21T00:29:46,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.9960 sec 2024-11-21T00:29:46,265 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 4.0010 sec 2024-11-21T00:29:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:46,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-21T00:29:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:46,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b007c7bafc834397be15e733cbfcf183_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148985717/Put/seqid=0 2024-11-21T00:29:46,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742502_1678 (size=12454) 2024-11-21T00:29:46,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149046358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149046359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149046360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149046360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-21T00:29:46,374 INFO [Thread-2589 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-21T00:29:46,374 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-21T00:29:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-21T00:29:46,375 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:46,376 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:46,376 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:46,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149046461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149046463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149046463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149046463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-21T00:29:46,526 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:46,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:46,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149046665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149046666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149046667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149046667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,678 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-21T00:29:46,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:46,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:46,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,748 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:46,751 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b007c7bafc834397be15e733cbfcf183_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b007c7bafc834397be15e733cbfcf183_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:46,752 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/64e698b5fc1348d395f8c28ab31761df, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:46,753 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/64e698b5fc1348d395f8c28ab31761df is 175, key is test_row_0/A:col10/1732148985717/Put/seqid=0 2024-11-21T00:29:46,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742503_1679 (size=31255) 2024-11-21T00:29:46,833 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:46,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:46,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149046969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149046971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149046972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-21T00:29:46,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149046978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,986 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:46,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:46,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:46,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:46,987 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,139 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:47,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:47,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,178 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=343, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/64e698b5fc1348d395f8c28ab31761df 2024-11-21T00:29:47,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/b050acbbf28f4f62ae2bb4a982a0e0d8 is 50, key is test_row_0/B:col10/1732148985717/Put/seqid=0 2024-11-21T00:29:47,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742504_1680 (size=12301) 2024-11-21T00:29:47,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:47,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:47,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,450 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:47,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:47,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:47,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149047466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,468 DEBUG [Thread-2579 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., hostname=0e7930017ff8,37961,1732148819586, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:47,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:47,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149047475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:47,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149047476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:47,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149047476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-21T00:29:47,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:47,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149047489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/b050acbbf28f4f62ae2bb4a982a0e0d8 2024-11-21T00:29:47,605 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:47,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:47,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:47,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/940fa2bacbc748a5b1924016479bf11e is 50, key is test_row_0/C:col10/1732148985717/Put/seqid=0 2024-11-21T00:29:47,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742505_1681 (size=12301) 2024-11-21T00:29:47,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/940fa2bacbc748a5b1924016479bf11e 2024-11-21T00:29:47,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/64e698b5fc1348d395f8c28ab31761df as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/64e698b5fc1348d395f8c28ab31761df 2024-11-21T00:29:47,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/64e698b5fc1348d395f8c28ab31761df, entries=150, sequenceid=343, filesize=30.5 K 2024-11-21T00:29:47,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/b050acbbf28f4f62ae2bb4a982a0e0d8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/b050acbbf28f4f62ae2bb4a982a0e0d8 2024-11-21T00:29:47,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/b050acbbf28f4f62ae2bb4a982a0e0d8, entries=150, sequenceid=343, filesize=12.0 K 2024-11-21T00:29:47,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/940fa2bacbc748a5b1924016479bf11e as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/940fa2bacbc748a5b1924016479bf11e 2024-11-21T00:29:47,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/940fa2bacbc748a5b1924016479bf11e, entries=150, sequenceid=343, filesize=12.0 K 2024-11-21T00:29:47,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1328ms, sequenceid=343, compaction requested=true 2024-11-21T00:29:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:47,669 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:47,669 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:47,670 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:47,670 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:47,670 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,670 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c8bbccd0407434dac84aaeaf19c5c95, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/8a7eb9cbb21f44a98fa66af438efa883, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/b050acbbf28f4f62ae2bb4a982a0e0d8] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.7 K 2024-11-21T00:29:47,671 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:47,671 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:47,671 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,671 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5e0a5e94ff4e477189a3a5e0c334f7d1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/dcf89d0a2830464ea9c6628109e5501c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/64e698b5fc1348d395f8c28ab31761df] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=92.3 K 2024-11-21T00:29:47,671 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,671 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5e0a5e94ff4e477189a3a5e0c334f7d1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/dcf89d0a2830464ea9c6628109e5501c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/64e698b5fc1348d395f8c28ab31761df] 2024-11-21T00:29:47,671 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c8bbccd0407434dac84aaeaf19c5c95, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732148984432 2024-11-21T00:29:47,671 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e0a5e94ff4e477189a3a5e0c334f7d1, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732148984432 2024-11-21T00:29:47,671 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a7eb9cbb21f44a98fa66af438efa883, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732148985067 2024-11-21T00:29:47,672 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcf89d0a2830464ea9c6628109e5501c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732148985067 2024-11-21T00:29:47,672 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting b050acbbf28f4f62ae2bb4a982a0e0d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732148985717 2024-11-21T00:29:47,672 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64e698b5fc1348d395f8c28ab31761df, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732148985717 2024-11-21T00:29:47,676 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:47,678 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#576 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:47,679 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6fc21e894bc2484eb062ae572d44e1a0 is 50, key is test_row_0/B:col10/1732148985717/Put/seqid=0 2024-11-21T00:29:47,687 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121cbb80def9cf14408adde83e221edd1a6_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:47,689 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121cbb80def9cf14408adde83e221edd1a6_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:47,689 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121cbb80def9cf14408adde83e221edd1a6_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:47,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742506_1682 (size=13119) 2024-11-21T00:29:47,701 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/6fc21e894bc2484eb062ae572d44e1a0 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6fc21e894bc2484eb062ae572d44e1a0 2024-11-21T00:29:47,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742507_1683 (size=4469) 2024-11-21T00:29:47,716 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 6fc21e894bc2484eb062ae572d44e1a0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:47,716 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:47,716 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148987669; duration=0sec 2024-11-21T00:29:47,716 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:47,716 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:47,716 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:47,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:47,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:47,717 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,717 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c1c76b2d4b8a4a6f8e9bf666b2caff62, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/d7fa1d6cc520413b82ba9b0010dbae7b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/940fa2bacbc748a5b1924016479bf11e] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.7 K 2024-11-21T00:29:47,717 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting c1c76b2d4b8a4a6f8e9bf666b2caff62, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732148984432 2024-11-21T00:29:47,718 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting d7fa1d6cc520413b82ba9b0010dbae7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732148985067 2024-11-21T00:29:47,718 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 940fa2bacbc748a5b1924016479bf11e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732148985717 2024-11-21T00:29:47,723 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#577 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:47,724 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/19f2ffd1f7764e3b8eb718a5e5891e93 is 50, key is test_row_0/C:col10/1732148985717/Put/seqid=0 2024-11-21T00:29:47,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742508_1684 (size=13119) 2024-11-21T00:29:47,757 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:47,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-21T00:29:47,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:47,758 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-21T00:29:47,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:47,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:47,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:47,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:47,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:47,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:47,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b8a482dd62e7410084f668f7e334749d_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148986357/Put/seqid=0 2024-11-21T00:29:47,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742509_1685 (size=12454) 2024-11-21T00:29:47,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:47,801 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121b8a482dd62e7410084f668f7e334749d_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b8a482dd62e7410084f668f7e334749d_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:47,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f1781dce5594cef8adbd7e93453e775, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:47,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f1781dce5594cef8adbd7e93453e775 is 175, key is test_row_0/A:col10/1732148986357/Put/seqid=0 2024-11-21T00:29:47,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742510_1686 (size=31255) 2024-11-21T00:29:48,120 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#575 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:48,121 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/31afaa405ee940ba8184b7003e8ea02f is 175, key is test_row_0/A:col10/1732148985717/Put/seqid=0 2024-11-21T00:29:48,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742511_1687 (size=32073) 2024-11-21T00:29:48,140 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/31afaa405ee940ba8184b7003e8ea02f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/31afaa405ee940ba8184b7003e8ea02f 2024-11-21T00:29:48,141 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/19f2ffd1f7764e3b8eb718a5e5891e93 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/19f2ffd1f7764e3b8eb718a5e5891e93 2024-11-21T00:29:48,156 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into 19f2ffd1f7764e3b8eb718a5e5891e93(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:48,156 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:48,156 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148987669; duration=0sec 2024-11-21T00:29:48,156 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:48,156 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:48,156 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into 31afaa405ee940ba8184b7003e8ea02f(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:48,156 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:48,156 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148987669; duration=0sec 2024-11-21T00:29:48,157 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:48,157 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:48,223 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f1781dce5594cef8adbd7e93453e775 2024-11-21T00:29:48,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/bb3759e7d02a4e8e8e16c394cb889c42 is 50, key is test_row_0/B:col10/1732148986357/Put/seqid=0 2024-11-21T00:29:48,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742512_1688 (size=12301) 2024-11-21T00:29:48,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-21T00:29:48,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:48,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:48,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149048524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149048524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149048529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149048529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149048634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149048634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149048634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149048634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,640 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/bb3759e7d02a4e8e8e16c394cb889c42 2024-11-21T00:29:48,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/923996423349499ca2b355f08d2d609f is 50, key is test_row_0/C:col10/1732148986357/Put/seqid=0 2024-11-21T00:29:48,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742513_1689 (size=12301) 2024-11-21T00:29:48,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149048839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149048839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149048840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:48,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:48,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149048842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,061 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/923996423349499ca2b355f08d2d609f 2024-11-21T00:29:49,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f1781dce5594cef8adbd7e93453e775 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f1781dce5594cef8adbd7e93453e775 2024-11-21T00:29:49,068 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f1781dce5594cef8adbd7e93453e775, entries=150, sequenceid=364, filesize=30.5 K 2024-11-21T00:29:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/bb3759e7d02a4e8e8e16c394cb889c42 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/bb3759e7d02a4e8e8e16c394cb889c42 2024-11-21T00:29:49,078 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/bb3759e7d02a4e8e8e16c394cb889c42, entries=150, sequenceid=364, filesize=12.0 K 2024-11-21T00:29:49,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/923996423349499ca2b355f08d2d609f as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/923996423349499ca2b355f08d2d609f 2024-11-21T00:29:49,101 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/923996423349499ca2b355f08d2d609f, entries=150, sequenceid=364, filesize=12.0 K 2024-11-21T00:29:49,102 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1344ms, sequenceid=364, compaction requested=false 2024-11-21T00:29:49,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:49,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:49,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-21T00:29:49,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-21T00:29:49,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-21T00:29:49,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7270 sec 2024-11-21T00:29:49,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.7300 sec 2024-11-21T00:29:49,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-21T00:29:49,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:49,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:49,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:49,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:49,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:49,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:49,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:49,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112175c4d98641f34674a91180d2a2eb3b52_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148988515/Put/seqid=0 2024-11-21T00:29:49,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742514_1690 (size=12454) 2024-11-21T00:29:49,181 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:49,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149049183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149049184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149049188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149049188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,203 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112175c4d98641f34674a91180d2a2eb3b52_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112175c4d98641f34674a91180d2a2eb3b52_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:49,204 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5ff545d015be421cbefb2349a9edb830, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:49,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5ff545d015be421cbefb2349a9edb830 is 175, key is test_row_0/A:col10/1732148988515/Put/seqid=0 2024-11-21T00:29:49,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742515_1691 (size=31255) 2024-11-21T00:29:49,235 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=384, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5ff545d015be421cbefb2349a9edb830 2024-11-21T00:29:49,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/7d1a99bf330f484dadb669059083c251 is 50, key is test_row_0/B:col10/1732148988515/Put/seqid=0 2024-11-21T00:29:49,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742516_1692 (size=12301) 2024-11-21T00:29:49,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149049289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149049289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149049304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149049304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149049491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149049491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149049506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149049507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/7d1a99bf330f484dadb669059083c251 2024-11-21T00:29:49,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/17abe04185354984b89143fa9e36bd44 is 50, key is test_row_0/C:col10/1732148988515/Put/seqid=0 2024-11-21T00:29:49,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742517_1693 (size=12301) 2024-11-21T00:29:49,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149049793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149049793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149049808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:49,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149049811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/17abe04185354984b89143fa9e36bd44 2024-11-21T00:29:50,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5ff545d015be421cbefb2349a9edb830 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5ff545d015be421cbefb2349a9edb830 2024-11-21T00:29:50,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5ff545d015be421cbefb2349a9edb830, entries=150, sequenceid=384, filesize=30.5 K 2024-11-21T00:29:50,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/7d1a99bf330f484dadb669059083c251 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7d1a99bf330f484dadb669059083c251 2024-11-21T00:29:50,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7d1a99bf330f484dadb669059083c251, entries=150, sequenceid=384, filesize=12.0 K 2024-11-21T00:29:50,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/17abe04185354984b89143fa9e36bd44 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/17abe04185354984b89143fa9e36bd44 2024-11-21T00:29:50,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/17abe04185354984b89143fa9e36bd44, entries=150, sequenceid=384, filesize=12.0 K 2024-11-21T00:29:50,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for dd3ebec8f640ec7aa187d6eb7b835b19 in 974ms, sequenceid=384, compaction requested=true 2024-11-21T00:29:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:50,118 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:50,118 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:50,128 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:50,128 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:50,128 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,128 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6fc21e894bc2484eb062ae572d44e1a0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/bb3759e7d02a4e8e8e16c394cb889c42, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7d1a99bf330f484dadb669059083c251] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.8 K 2024-11-21T00:29:50,129 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fc21e894bc2484eb062ae572d44e1a0, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732148985717 2024-11-21T00:29:50,129 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting bb3759e7d02a4e8e8e16c394cb889c42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732148986357 2024-11-21T00:29:50,130 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d1a99bf330f484dadb669059083c251, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1732148988515 2024-11-21T00:29:50,131 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:50,131 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:50,131 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,131 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/31afaa405ee940ba8184b7003e8ea02f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f1781dce5594cef8adbd7e93453e775, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5ff545d015be421cbefb2349a9edb830] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=92.4 K 2024-11-21T00:29:50,131 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,131 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/31afaa405ee940ba8184b7003e8ea02f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f1781dce5594cef8adbd7e93453e775, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5ff545d015be421cbefb2349a9edb830] 2024-11-21T00:29:50,131 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31afaa405ee940ba8184b7003e8ea02f, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732148985717 2024-11-21T00:29:50,132 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f1781dce5594cef8adbd7e93453e775, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732148986357 2024-11-21T00:29:50,132 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ff545d015be421cbefb2349a9edb830, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1732148988515 2024-11-21T00:29:50,149 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#584 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:50,150 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:50,151 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/3b40a733756d44b8b41d7db20aad94b6 is 50, key is test_row_0/B:col10/1732148988515/Put/seqid=0 2024-11-21T00:29:50,155 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411215d7fcc4d4ff04ce09171bf22276e9845_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:50,157 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411215d7fcc4d4ff04ce09171bf22276e9845_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:50,157 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411215d7fcc4d4ff04ce09171bf22276e9845_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:50,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742519_1695 (size=4469) 2024-11-21T00:29:50,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742518_1694 (size=13221) 2024-11-21T00:29:50,217 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/3b40a733756d44b8b41d7db20aad94b6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3b40a733756d44b8b41d7db20aad94b6 2024-11-21T00:29:50,228 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 3b40a733756d44b8b41d7db20aad94b6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:50,228 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:50,228 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=13, startTime=1732148990118; duration=0sec 2024-11-21T00:29:50,228 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:50,228 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:50,228 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T00:29:50,231 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T00:29:50,231 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:50,231 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,231 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/19f2ffd1f7764e3b8eb718a5e5891e93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/923996423349499ca2b355f08d2d609f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/17abe04185354984b89143fa9e36bd44] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=36.8 K 2024-11-21T00:29:50,251 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 19f2ffd1f7764e3b8eb718a5e5891e93, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732148985717 2024-11-21T00:29:50,252 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 923996423349499ca2b355f08d2d609f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732148986357 2024-11-21T00:29:50,252 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 17abe04185354984b89143fa9e36bd44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1732148988515 2024-11-21T00:29:50,270 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#586 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:50,270 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/769ce07b6aad42f795acf356eb13f6e3 is 50, key is test_row_0/C:col10/1732148988515/Put/seqid=0 2024-11-21T00:29:50,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742520_1696 (size=13221) 2024-11-21T00:29:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:50,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-21T00:29:50,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:50,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:50,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:50,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:50,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:50,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:50,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411211ca3afa383564db3a6eb4c5b13ea0eb6_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148990298/Put/seqid=0 2024-11-21T00:29:50,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742521_1697 (size=14994) 2024-11-21T00:29:50,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149050322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149050324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149050324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149050325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149050426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149050428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149050429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149050430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-21T00:29:50,488 INFO [Thread-2589 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-21T00:29:50,489 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-21T00:29:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-21T00:29:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-21T00:29:50,491 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-21T00:29:50,491 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T00:29:50,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T00:29:50,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-21T00:29:50,591 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#585 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:50,592 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/b4b57f81e83542f58882605c8576aab8 is 175, key is test_row_0/A:col10/1732148988515/Put/seqid=0 2024-11-21T00:29:50,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742522_1698 (size=32175) 2024-11-21T00:29:50,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149050630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149050633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149050634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,643 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:50,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149050641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:50,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,683 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/769ce07b6aad42f795acf356eb13f6e3 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/769ce07b6aad42f795acf356eb13f6e3 2024-11-21T00:29:50,687 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into 769ce07b6aad42f795acf356eb13f6e3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:50,687 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:50,687 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=13, startTime=1732148990118; duration=0sec 2024-11-21T00:29:50,687 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:50,687 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:50,711 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:50,717 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411211ca3afa383564db3a6eb4c5b13ea0eb6_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211ca3afa383564db3a6eb4c5b13ea0eb6_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:50,718 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c2939dd928104000b1949eb7b18d9246, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:50,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c2939dd928104000b1949eb7b18d9246 is 175, key is test_row_0/A:col10/1732148990298/Put/seqid=0 2024-11-21T00:29:50,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742523_1699 (size=39949) 2024-11-21T00:29:50,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-21T00:29:50,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:50,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:50,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149050933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149050937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149050937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:50,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:50,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:50,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:50,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:50,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:50,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149050948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,012 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/b4b57f81e83542f58882605c8576aab8 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b4b57f81e83542f58882605c8576aab8 2024-11-21T00:29:51,016 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into b4b57f81e83542f58882605c8576aab8(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:51,016 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:51,016 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=13, startTime=1732148990118; duration=0sec 2024-11-21T00:29:51,016 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:51,016 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:51,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-21T00:29:51,105 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:51,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:51,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,129 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=405, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c2939dd928104000b1949eb7b18d9246 2024-11-21T00:29:51,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/fc5e5a2aae194717a13b56c2b1db4008 is 50, key is test_row_0/B:col10/1732148990298/Put/seqid=0 2024-11-21T00:29:51,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742524_1700 (size=12301) 2024-11-21T00:29:51,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,415 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:51,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:51,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:51,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149051440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:51,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149051443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:51,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149051443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:51,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149051451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:51,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58080 deadline: 1732149051497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,498 DEBUG [Thread-2579 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., hostname=0e7930017ff8,37961,1732148819586, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:51,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/fc5e5a2aae194717a13b56c2b1db4008 2024-11-21T00:29:51,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/cec2d4860ad541668af98ba7a624a0ac is 50, key is test_row_0/C:col10/1732148990298/Put/seqid=0 2024-11-21T00:29:51,567 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:51,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:51,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742525_1701 (size=12301) 2024-11-21T00:29:51,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-21T00:29:51,721 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:51,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:51,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,873 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:51,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:51,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:51,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:51,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:29:51,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/cec2d4860ad541668af98ba7a624a0ac 2024-11-21T00:29:51,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/c2939dd928104000b1949eb7b18d9246 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c2939dd928104000b1949eb7b18d9246 2024-11-21T00:29:51,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c2939dd928104000b1949eb7b18d9246, entries=200, sequenceid=405, filesize=39.0 K 2024-11-21T00:29:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/fc5e5a2aae194717a13b56c2b1db4008 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/fc5e5a2aae194717a13b56c2b1db4008 2024-11-21T00:29:52,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/fc5e5a2aae194717a13b56c2b1db4008, entries=150, sequenceid=405, filesize=12.0 K 2024-11-21T00:29:52,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/cec2d4860ad541668af98ba7a624a0ac as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/cec2d4860ad541668af98ba7a624a0ac 2024-11-21T00:29:52,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/cec2d4860ad541668af98ba7a624a0ac, entries=150, sequenceid=405, filesize=12.0 K 2024-11-21T00:29:52,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1710ms, sequenceid=405, compaction requested=false 2024-11-21T00:29:52,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:52,025 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37961 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-21T00:29:52,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:52,037 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-21T00:29:52,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:52,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:52,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:52,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:52,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:52,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:52,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d1a9d2a1f6be415390efa4612db33ab2_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148990324/Put/seqid=0 2024-11-21T00:29:52,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742526_1702 (size=12454) 2024-11-21T00:29:52,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:52,066 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d1a9d2a1f6be415390efa4612db33ab2_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d1a9d2a1f6be415390efa4612db33ab2_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:52,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8f8b7a22b1854c6499c8d7e4e26c4dee, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:52,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8f8b7a22b1854c6499c8d7e4e26c4dee is 175, key is test_row_0/A:col10/1732148990324/Put/seqid=0 2024-11-21T00:29:52,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742527_1703 (size=31255) 2024-11-21T00:29:52,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. as already flushing 2024-11-21T00:29:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:52,478 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=424, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8f8b7a22b1854c6499c8d7e4e26c4dee 2024-11-21T00:29:52,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149052479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149052479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149052480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149052481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/52134ded2b4a40cc948d7dfd4180a6c6 is 50, key is test_row_0/B:col10/1732148990324/Put/seqid=0 2024-11-21T00:29:52,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742528_1704 (size=12301) 2024-11-21T00:29:52,527 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/52134ded2b4a40cc948d7dfd4180a6c6 2024-11-21T00:29:52,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/edc326489e584aa5b4f9688ebbbb2daf is 50, key is test_row_0/C:col10/1732148990324/Put/seqid=0 2024-11-21T00:29:52,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742529_1705 (size=12301) 2024-11-21T00:29:52,577 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/edc326489e584aa5b4f9688ebbbb2daf 2024-11-21T00:29:52,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149052586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149052586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149052586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149052586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-21T00:29:52,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/8f8b7a22b1854c6499c8d7e4e26c4dee as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8f8b7a22b1854c6499c8d7e4e26c4dee 2024-11-21T00:29:52,640 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8f8b7a22b1854c6499c8d7e4e26c4dee, entries=150, sequenceid=424, filesize=30.5 K 2024-11-21T00:29:52,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/52134ded2b4a40cc948d7dfd4180a6c6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/52134ded2b4a40cc948d7dfd4180a6c6 2024-11-21T00:29:52,663 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/52134ded2b4a40cc948d7dfd4180a6c6, entries=150, sequenceid=424, filesize=12.0 K 2024-11-21T00:29:52,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/edc326489e584aa5b4f9688ebbbb2daf as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/edc326489e584aa5b4f9688ebbbb2daf 2024-11-21T00:29:52,669 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/edc326489e584aa5b4f9688ebbbb2daf, entries=150, sequenceid=424, filesize=12.0 K 2024-11-21T00:29:52,669 INFO [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for dd3ebec8f640ec7aa187d6eb7b835b19 in 632ms, sequenceid=424, compaction requested=true 2024-11-21T00:29:52,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:52,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:52,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0e7930017ff8:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-21T00:29:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-21T00:29:52,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-21T00:29:52,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1780 sec 2024-11-21T00:29:52,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 2.1830 sec 2024-11-21T00:29:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:52,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-21T00:29:52,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:52,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:52,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:52,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:52,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:52,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:52,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121c797f428c10f4b7b8ed1cc7ea68fa84d_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148992789/Put/seqid=0 2024-11-21T00:29:52,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149052807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149052808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149052808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149052810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742530_1706 (size=12454) 2024-11-21T00:29:52,824 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:52,829 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121c797f428c10f4b7b8ed1cc7ea68fa84d_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121c797f428c10f4b7b8ed1cc7ea68fa84d_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:52,830 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/89ef7f91d9454a8caa5a0b1fa7fce7ec, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:52,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/89ef7f91d9454a8caa5a0b1fa7fce7ec is 175, key is test_row_0/A:col10/1732148992789/Put/seqid=0 2024-11-21T00:29:52,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742531_1707 (size=31255) 2024-11-21T00:29:52,840 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=445, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/89ef7f91d9454a8caa5a0b1fa7fce7ec 2024-11-21T00:29:52,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/053c225d871046d4abbcbd3e0430e2f2 is 50, key is test_row_0/B:col10/1732148992789/Put/seqid=0 2024-11-21T00:29:52,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742532_1708 (size=12301) 2024-11-21T00:29:52,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149052911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149052912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149052913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:52,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149052914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149053114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149053115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149053119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149053119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/053c225d871046d4abbcbd3e0430e2f2 2024-11-21T00:29:53,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/be78ec73981147098e8394cbe5bc3fa6 is 50, key is test_row_0/C:col10/1732148992789/Put/seqid=0 2024-11-21T00:29:53,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742533_1709 (size=12301) 2024-11-21T00:29:53,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58064 deadline: 1732149053421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58034 deadline: 1732149053421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58036 deadline: 1732149053425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-21T00:29:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58046 deadline: 1732149053425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 2024-11-21T00:29:53,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/be78ec73981147098e8394cbe5bc3fa6 2024-11-21T00:29:53,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/89ef7f91d9454a8caa5a0b1fa7fce7ec as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/89ef7f91d9454a8caa5a0b1fa7fce7ec 2024-11-21T00:29:53,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/89ef7f91d9454a8caa5a0b1fa7fce7ec, entries=150, sequenceid=445, filesize=30.5 K 2024-11-21T00:29:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/053c225d871046d4abbcbd3e0430e2f2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/053c225d871046d4abbcbd3e0430e2f2 2024-11-21T00:29:53,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/053c225d871046d4abbcbd3e0430e2f2, entries=150, sequenceid=445, filesize=12.0 K 2024-11-21T00:29:53,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/be78ec73981147098e8394cbe5bc3fa6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/be78ec73981147098e8394cbe5bc3fa6 2024-11-21T00:29:53,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/be78ec73981147098e8394cbe5bc3fa6, entries=150, sequenceid=445, filesize=12.0 K 2024-11-21T00:29:53,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for dd3ebec8f640ec7aa187d6eb7b835b19 in 932ms, sequenceid=445, compaction requested=true 2024-11-21T00:29:53,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:53,722 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:53,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:A, priority=-2147483648, current under compaction store size is 1 2024-11-21T00:29:53,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:53,723 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:53,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:B, priority=-2147483648, current under compaction store size is 2 2024-11-21T00:29:53,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:53,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dd3ebec8f640ec7aa187d6eb7b835b19:C, priority=-2147483648, current under compaction store size is 3 2024-11-21T00:29:53,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:53,724 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:53,724 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:53,724 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/B is initiating minor compaction (all files) 2024-11-21T00:29:53,724 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/A is initiating minor compaction (all files) 2024-11-21T00:29:53,724 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/B in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:53,724 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3b40a733756d44b8b41d7db20aad94b6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/fc5e5a2aae194717a13b56c2b1db4008, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/52134ded2b4a40cc948d7dfd4180a6c6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/053c225d871046d4abbcbd3e0430e2f2] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=48.9 K 2024-11-21T00:29:53,724 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/A in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:53,724 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b4b57f81e83542f58882605c8576aab8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c2939dd928104000b1949eb7b18d9246, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8f8b7a22b1854c6499c8d7e4e26c4dee, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/89ef7f91d9454a8caa5a0b1fa7fce7ec] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=131.5 K 2024-11-21T00:29:53,724 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:53,724 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. files: [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b4b57f81e83542f58882605c8576aab8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c2939dd928104000b1949eb7b18d9246, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8f8b7a22b1854c6499c8d7e4e26c4dee, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/89ef7f91d9454a8caa5a0b1fa7fce7ec] 2024-11-21T00:29:53,725 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b40a733756d44b8b41d7db20aad94b6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1732148988515 2024-11-21T00:29:53,726 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4b57f81e83542f58882605c8576aab8, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1732148988515 2024-11-21T00:29:53,726 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting fc5e5a2aae194717a13b56c2b1db4008, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732148989162 2024-11-21T00:29:53,726 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 52134ded2b4a40cc948d7dfd4180a6c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1732148990318 2024-11-21T00:29:53,726 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2939dd928104000b1949eb7b18d9246, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732148989162 2024-11-21T00:29:53,735 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f8b7a22b1854c6499c8d7e4e26c4dee, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1732148990318 2024-11-21T00:29:53,735 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 053c225d871046d4abbcbd3e0430e2f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732148992479 2024-11-21T00:29:53,739 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89ef7f91d9454a8caa5a0b1fa7fce7ec, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732148992479 2024-11-21T00:29:53,805 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#B#compaction#596 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:53,805 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5581541b69334dbf90f7cd837cb23077 is 50, key is test_row_0/B:col10/1732148992789/Put/seqid=0 2024-11-21T00:29:53,811 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:53,820 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241121939187da8be64d6abe050cd2003b4279_dd3ebec8f640ec7aa187d6eb7b835b19 store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:53,822 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241121939187da8be64d6abe050cd2003b4279_dd3ebec8f640ec7aa187d6eb7b835b19, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:53,822 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121939187da8be64d6abe050cd2003b4279_dd3ebec8f640ec7aa187d6eb7b835b19 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:53,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742534_1710 (size=13357) 2024-11-21T00:29:53,850 DEBUG [Thread-2594 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fbb1399 to 127.0.0.1:64241 2024-11-21T00:29:53,850 DEBUG [Thread-2590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d02ace0 to 127.0.0.1:64241 2024-11-21T00:29:53,850 DEBUG [Thread-2590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,850 DEBUG [Thread-2594 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,851 DEBUG [Thread-2596 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51fccca6 to 127.0.0.1:64241 2024-11-21T00:29:53,851 DEBUG [Thread-2596 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,854 DEBUG [Thread-2598 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x539997ae to 127.0.0.1:64241 2024-11-21T00:29:53,854 DEBUG [Thread-2598 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,857 DEBUG [Thread-2592 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63054209 to 127.0.0.1:64241 2024-11-21T00:29:53,857 DEBUG [Thread-2592 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,857 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/5581541b69334dbf90f7cd837cb23077 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5581541b69334dbf90f7cd837cb23077 2024-11-21T00:29:53,861 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/B of dd3ebec8f640ec7aa187d6eb7b835b19 into 5581541b69334dbf90f7cd837cb23077(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:53,861 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:53,862 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/B, priority=12, startTime=1732148993723; duration=0sec 2024-11-21T00:29:53,862 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T00:29:53,862 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:B 2024-11-21T00:29:53,862 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T00:29:53,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742535_1711 (size=4469) 2024-11-21T00:29:53,865 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-21T00:29:53,865 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1540): dd3ebec8f640ec7aa187d6eb7b835b19/C is initiating minor compaction (all files) 2024-11-21T00:29:53,865 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dd3ebec8f640ec7aa187d6eb7b835b19/C in TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:29:53,866 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/769ce07b6aad42f795acf356eb13f6e3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/cec2d4860ad541668af98ba7a624a0ac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/edc326489e584aa5b4f9688ebbbb2daf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/be78ec73981147098e8394cbe5bc3fa6] into tmpdir=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp, totalSize=48.9 K 2024-11-21T00:29:53,866 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#A#compaction#597 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:53,866 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/875a5090ba4f4527a5ef3196aa90005c is 175, key is test_row_0/A:col10/1732148992789/Put/seqid=0 2024-11-21T00:29:53,870 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting 769ce07b6aad42f795acf356eb13f6e3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1732148988515 2024-11-21T00:29:53,872 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting cec2d4860ad541668af98ba7a624a0ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732148989162 2024-11-21T00:29:53,873 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting edc326489e584aa5b4f9688ebbbb2daf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1732148990318 2024-11-21T00:29:53,874 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] compactions.Compactor(224): Compacting be78ec73981147098e8394cbe5bc3fa6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732148992479 2024-11-21T00:29:53,913 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dd3ebec8f640ec7aa187d6eb7b835b19#C#compaction#598 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T00:29:53,913 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/44467aee0df44be0a24a26092d4663f7 is 50, key is test_row_0/C:col10/1732148992789/Put/seqid=0 2024-11-21T00:29:53,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742536_1712 (size=32311) 2024-11-21T00:29:53,920 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/875a5090ba4f4527a5ef3196aa90005c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/875a5090ba4f4527a5ef3196aa90005c 2024-11-21T00:29:53,925 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/A of dd3ebec8f640ec7aa187d6eb7b835b19 into 875a5090ba4f4527a5ef3196aa90005c(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:53,925 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:53,925 INFO [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/A, priority=12, startTime=1732148993722; duration=0sec 2024-11-21T00:29:53,925 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:53,925 DEBUG [RS:0;0e7930017ff8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:A 2024-11-21T00:29:53,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8581): Flush requested on dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:53,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-21T00:29:53,928 DEBUG [Thread-2581 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cc71f2e to 127.0.0.1:64241 2024-11-21T00:29:53,928 DEBUG [Thread-2581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:29:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:29:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:29:53,929 DEBUG [Thread-2583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79a7bd2b to 127.0.0.1:64241 2024-11-21T00:29:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:29:53,929 DEBUG [Thread-2583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742537_1713 (size=13357) 2024-11-21T00:29:53,933 DEBUG [Thread-2585 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d688bcb to 127.0.0.1:64241 2024-11-21T00:29:53,933 DEBUG [Thread-2587 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31f7e171 to 127.0.0.1:64241 2024-11-21T00:29:53,933 DEBUG [Thread-2585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,933 DEBUG [Thread-2587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:53,939 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/44467aee0df44be0a24a26092d4663f7 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/44467aee0df44be0a24a26092d4663f7 2024-11-21T00:29:53,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411218df3e3fd141a4da3a330bce5485ee2c7_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148992806/Put/seqid=0 2024-11-21T00:29:53,961 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dd3ebec8f640ec7aa187d6eb7b835b19/C of dd3ebec8f640ec7aa187d6eb7b835b19 into 44467aee0df44be0a24a26092d4663f7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T00:29:53,961 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:53,961 INFO [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19., storeName=dd3ebec8f640ec7aa187d6eb7b835b19/C, priority=12, startTime=1732148993723; duration=0sec 2024-11-21T00:29:53,961 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T00:29:53,961 DEBUG [RS:0;0e7930017ff8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dd3ebec8f640ec7aa187d6eb7b835b19:C 2024-11-21T00:29:53,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742538_1714 (size=12454) 2024-11-21T00:29:54,373 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:54,375 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411218df3e3fd141a4da3a330bce5485ee2c7_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411218df3e3fd141a4da3a330bce5485ee2c7_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:29:54,376 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/bbcc82ce2e7e47638b5ff7ecf572dd8c, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:29:54,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/bbcc82ce2e7e47638b5ff7ecf572dd8c is 175, key is test_row_0/A:col10/1732148992806/Put/seqid=0 2024-11-21T00:29:54,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742539_1715 (size=31255) 2024-11-21T00:29:54,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-21T00:29:54,596 INFO [Thread-2589 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-21T00:29:54,780 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=464, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/bbcc82ce2e7e47638b5ff7ecf572dd8c 2024-11-21T00:29:54,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/51afa2f2411741fcbbea67367f61d067 is 50, key is test_row_0/B:col10/1732148992806/Put/seqid=0 2024-11-21T00:29:54,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742540_1716 (size=12301) 2024-11-21T00:29:54,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/51afa2f2411741fcbbea67367f61d067 2024-11-21T00:29:54,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/1cf04e3fd6a8471ba69d98b51968c2a2 is 50, key is test_row_0/C:col10/1732148992806/Put/seqid=0 2024-11-21T00:29:54,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742541_1717 (size=12301) 2024-11-21T00:29:55,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/1cf04e3fd6a8471ba69d98b51968c2a2 2024-11-21T00:29:55,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/bbcc82ce2e7e47638b5ff7ecf572dd8c as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/bbcc82ce2e7e47638b5ff7ecf572dd8c 2024-11-21T00:29:55,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/bbcc82ce2e7e47638b5ff7ecf572dd8c, entries=150, sequenceid=464, filesize=30.5 K 2024-11-21T00:29:55,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/51afa2f2411741fcbbea67367f61d067 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/51afa2f2411741fcbbea67367f61d067 2024-11-21T00:29:55,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/51afa2f2411741fcbbea67367f61d067, entries=150, sequenceid=464, filesize=12.0 K 2024-11-21T00:29:55,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/1cf04e3fd6a8471ba69d98b51968c2a2 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/1cf04e3fd6a8471ba69d98b51968c2a2 2024-11-21T00:29:55,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/1cf04e3fd6a8471ba69d98b51968c2a2, entries=150, sequenceid=464, filesize=12.0 K 2024-11-21T00:29:55,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=13.42 KB/13740 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1296ms, sequenceid=464, compaction requested=false 2024-11-21T00:29:55,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:29:57,299 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:30:01,590 DEBUG [Thread-2579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65f51785 to 127.0.0.1:64241 2024-11-21T00:30:01,590 DEBUG [Thread-2579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:30:01,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 42 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3889 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3720 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3651 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3839 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3763 2024-11-21T00:30:01,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-21T00:30:01,591 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-21T00:30:01,591 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2089b1f4 to 127.0.0.1:64241 2024-11-21T00:30:01,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:30:01,591 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-21T00:30:01,592 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-21T00:30:01,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-21T00:30:01,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-21T00:30:01,595 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732149001595"}]},"ts":"1732149001595"} 2024-11-21T00:30:01,596 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-21T00:30:01,619 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-21T00:30:01,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-21T00:30:01,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, UNASSIGN}] 2024-11-21T00:30:01,621 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, UNASSIGN 2024-11-21T00:30:01,621 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=CLOSING, regionLocation=0e7930017ff8,37961,1732148819586 2024-11-21T00:30:01,622 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T00:30:01,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; CloseRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586}] 2024-11-21T00:30:01,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-21T00:30:01,773 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0e7930017ff8,37961,1732148819586 2024-11-21T00:30:01,774 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing dd3ebec8f640ec7aa187d6eb7b835b19, disabling compactions & flushes 2024-11-21T00:30:01,774 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. after waiting 0 ms 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:30:01,774 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(2837): Flushing dd3ebec8f640ec7aa187d6eb7b835b19 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=A 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=B 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:30:01,774 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dd3ebec8f640ec7aa187d6eb7b835b19, store=C 2024-11-21T00:30:01,775 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-21T00:30:01,779 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d8b489c7a19c41a6b8713d25baa5662d_dd3ebec8f640ec7aa187d6eb7b835b19 is 50, key is test_row_0/A:col10/1732148993932/Put/seqid=0 2024-11-21T00:30:01,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742542_1718 (size=12454) 2024-11-21T00:30:01,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-21T00:30:02,184 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:30:02,187 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241121d8b489c7a19c41a6b8713d25baa5662d_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d8b489c7a19c41a6b8713d25baa5662d_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:02,188 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f765af2d6ff4910bb9e5503a19eec02, store: [table=TestAcidGuarantees family=A region=dd3ebec8f640ec7aa187d6eb7b835b19] 2024-11-21T00:30:02,189 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f765af2d6ff4910bb9e5503a19eec02 is 175, key is test_row_0/A:col10/1732148993932/Put/seqid=0 2024-11-21T00:30:02,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742543_1719 (size=31255) 2024-11-21T00:30:02,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-21T00:30:02,592 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=471, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f765af2d6ff4910bb9e5503a19eec02 2024-11-21T00:30:02,598 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/a8b864d6f5a9484d8022ae58b5a34901 is 50, key is test_row_0/B:col10/1732148993932/Put/seqid=0 2024-11-21T00:30:02,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742544_1720 (size=12301) 2024-11-21T00:30:02,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-21T00:30:03,004 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/a8b864d6f5a9484d8022ae58b5a34901 2024-11-21T00:30:03,009 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/8023bc1b4a6140d59b5bce74d0d76134 is 50, key is test_row_0/C:col10/1732148993932/Put/seqid=0 2024-11-21T00:30:03,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742545_1721 (size=12301) 2024-11-21T00:30:03,416 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/8023bc1b4a6140d59b5bce74d0d76134 2024-11-21T00:30:03,419 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/A/5f765af2d6ff4910bb9e5503a19eec02 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f765af2d6ff4910bb9e5503a19eec02 2024-11-21T00:30:03,422 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f765af2d6ff4910bb9e5503a19eec02, entries=150, sequenceid=471, filesize=30.5 K 2024-11-21T00:30:03,422 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/B/a8b864d6f5a9484d8022ae58b5a34901 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/a8b864d6f5a9484d8022ae58b5a34901 2024-11-21T00:30:03,425 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/a8b864d6f5a9484d8022ae58b5a34901, entries=150, sequenceid=471, filesize=12.0 K 2024-11-21T00:30:03,425 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/.tmp/C/8023bc1b4a6140d59b5bce74d0d76134 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/8023bc1b4a6140d59b5bce74d0d76134 2024-11-21T00:30:03,428 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/8023bc1b4a6140d59b5bce74d0d76134, entries=150, sequenceid=471, filesize=12.0 K 2024-11-21T00:30:03,429 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for dd3ebec8f640ec7aa187d6eb7b835b19 in 1655ms, sequenceid=471, compaction requested=true 2024-11-21T00:30:03,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/f7745ef404204b1caf4ce131214c0eef, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c93c85d2bf32448098ce87406a4d25df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/d2dc478cef734ea9963f13cd3bdfdb77, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c08c55aca11048debcaff1dc4b32a4b1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8c307c47d4d64b099348d8fb860e1d9b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/29aed4ee3d604f18b89d8686b0fc9582, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/adf847e2881b43d0944fd9c2d333e30d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8e72debc654f4aae9b1f7a2b92235dee, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/4f9adc1bfec24856b88034c8ad18f996, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/244df12bc8074116b21bb27c18c0c274, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e79a9765e968403eade36e08118964ff, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b17c35b70ebb49ef9eced82d561df309, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e041f3b0cc2a425ab354974b18631cf1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/09638a375a884938b91f8aec7ed5edd1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/92f680fe9a86492b911f7558dff82828, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e30904b3d98c4d2dbb825964ddc50614, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/0703ce62599a45d2be11b1739d2467d8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/1d868957572a415a941da92c1ef2c458, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/9cfc72c0a475409b8074125bfb58b384, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/a6bf789230eb453aafe809420d9b59a5, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5e0a5e94ff4e477189a3a5e0c334f7d1, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/3c8733706a9d454396d42789e76c4f74, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/dcf89d0a2830464ea9c6628109e5501c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/31afaa405ee940ba8184b7003e8ea02f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/64e698b5fc1348d395f8c28ab31761df, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f1781dce5594cef8adbd7e93453e775, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b4b57f81e83542f58882605c8576aab8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5ff545d015be421cbefb2349a9edb830, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c2939dd928104000b1949eb7b18d9246, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8f8b7a22b1854c6499c8d7e4e26c4dee, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/89ef7f91d9454a8caa5a0b1fa7fce7ec] to archive 2024-11-21T00:30:03,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:30:03,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/f7745ef404204b1caf4ce131214c0eef to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/f7745ef404204b1caf4ce131214c0eef 2024-11-21T00:30:03,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c93c85d2bf32448098ce87406a4d25df to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c93c85d2bf32448098ce87406a4d25df 2024-11-21T00:30:03,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/d2dc478cef734ea9963f13cd3bdfdb77 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/d2dc478cef734ea9963f13cd3bdfdb77 2024-11-21T00:30:03,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c08c55aca11048debcaff1dc4b32a4b1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c08c55aca11048debcaff1dc4b32a4b1 2024-11-21T00:30:03,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8c307c47d4d64b099348d8fb860e1d9b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8c307c47d4d64b099348d8fb860e1d9b 2024-11-21T00:30:03,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/29aed4ee3d604f18b89d8686b0fc9582 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/29aed4ee3d604f18b89d8686b0fc9582 2024-11-21T00:30:03,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/adf847e2881b43d0944fd9c2d333e30d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/adf847e2881b43d0944fd9c2d333e30d 2024-11-21T00:30:03,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8e72debc654f4aae9b1f7a2b92235dee to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8e72debc654f4aae9b1f7a2b92235dee 2024-11-21T00:30:03,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/4f9adc1bfec24856b88034c8ad18f996 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/4f9adc1bfec24856b88034c8ad18f996 2024-11-21T00:30:03,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/244df12bc8074116b21bb27c18c0c274 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/244df12bc8074116b21bb27c18c0c274 2024-11-21T00:30:03,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e79a9765e968403eade36e08118964ff to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e79a9765e968403eade36e08118964ff 2024-11-21T00:30:03,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b17c35b70ebb49ef9eced82d561df309 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b17c35b70ebb49ef9eced82d561df309 2024-11-21T00:30:03,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e041f3b0cc2a425ab354974b18631cf1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e041f3b0cc2a425ab354974b18631cf1 2024-11-21T00:30:03,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/09638a375a884938b91f8aec7ed5edd1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/09638a375a884938b91f8aec7ed5edd1 2024-11-21T00:30:03,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/92f680fe9a86492b911f7558dff82828 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/92f680fe9a86492b911f7558dff82828 2024-11-21T00:30:03,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e30904b3d98c4d2dbb825964ddc50614 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/e30904b3d98c4d2dbb825964ddc50614 2024-11-21T00:30:03,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/0703ce62599a45d2be11b1739d2467d8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/0703ce62599a45d2be11b1739d2467d8 2024-11-21T00:30:03,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/1d868957572a415a941da92c1ef2c458 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/1d868957572a415a941da92c1ef2c458 2024-11-21T00:30:03,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/9cfc72c0a475409b8074125bfb58b384 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/9cfc72c0a475409b8074125bfb58b384 2024-11-21T00:30:03,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/a6bf789230eb453aafe809420d9b59a5 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/a6bf789230eb453aafe809420d9b59a5 2024-11-21T00:30:03,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5e0a5e94ff4e477189a3a5e0c334f7d1 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5e0a5e94ff4e477189a3a5e0c334f7d1 2024-11-21T00:30:03,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/3c8733706a9d454396d42789e76c4f74 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/3c8733706a9d454396d42789e76c4f74 2024-11-21T00:30:03,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/dcf89d0a2830464ea9c6628109e5501c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/dcf89d0a2830464ea9c6628109e5501c 2024-11-21T00:30:03,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/31afaa405ee940ba8184b7003e8ea02f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/31afaa405ee940ba8184b7003e8ea02f 2024-11-21T00:30:03,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/64e698b5fc1348d395f8c28ab31761df to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/64e698b5fc1348d395f8c28ab31761df 2024-11-21T00:30:03,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f1781dce5594cef8adbd7e93453e775 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f1781dce5594cef8adbd7e93453e775 2024-11-21T00:30:03,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b4b57f81e83542f58882605c8576aab8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/b4b57f81e83542f58882605c8576aab8 2024-11-21T00:30:03,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5ff545d015be421cbefb2349a9edb830 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5ff545d015be421cbefb2349a9edb830 2024-11-21T00:30:03,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c2939dd928104000b1949eb7b18d9246 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/c2939dd928104000b1949eb7b18d9246 2024-11-21T00:30:03,462 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8f8b7a22b1854c6499c8d7e4e26c4dee to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/8f8b7a22b1854c6499c8d7e4e26c4dee 2024-11-21T00:30:03,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/89ef7f91d9454a8caa5a0b1fa7fce7ec to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/89ef7f91d9454a8caa5a0b1fa7fce7ec 2024-11-21T00:30:03,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f3e62dacdd504bdbb88449f31b286b3c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/ff58de0bae044831b113bb048c86f499, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/846dbce7bd27463a9e24119056e62c08, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6b124858d73e4230ad0d0a95aeaa4082, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6d3fbf475b9f4ceda44a341b2e357d4b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7435a7408d874d64b7ff3fffc2a7f3e6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4e33ccdb19bc45608b7f0c3696474a1c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2d5ad2c14e81492eadc6e6f2fe8d7cda, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/38729516b2874b98bf7738d5a2d1a930, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/621dcaa494634f21bd7b9c33a7fffa79, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/53a7625f6e504c948bb3ea9e3199937b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/e84f647806e74f98b20189f7e304465b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/0b39a5b87bbf4d7abafc1dc0a4267a69, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5169f4d5a5794991bdfbe4c3ba7f2b6d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f308d5f2e6a24c7db5d027ec3245e26c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c364b4417544b19906c962fefb336f8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2e50d77664c643a7ba5043d9402c022b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/80fc00ce2db34a98a2f1e0469689bbac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4c3f9426c8934814adae8b30bef31d5d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5cd1b5cf56284ab8822b484add208218, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c8bbccd0407434dac84aaeaf19c5c95, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/113a81f1ab074513bea7549edf773d8d, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/8a7eb9cbb21f44a98fa66af438efa883, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6fc21e894bc2484eb062ae572d44e1a0, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/b050acbbf28f4f62ae2bb4a982a0e0d8, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/bb3759e7d02a4e8e8e16c394cb889c42, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3b40a733756d44b8b41d7db20aad94b6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7d1a99bf330f484dadb669059083c251, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/fc5e5a2aae194717a13b56c2b1db4008, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/52134ded2b4a40cc948d7dfd4180a6c6, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/053c225d871046d4abbcbd3e0430e2f2] to archive 2024-11-21T00:30:03,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:30:03,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f3e62dacdd504bdbb88449f31b286b3c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f3e62dacdd504bdbb88449f31b286b3c 2024-11-21T00:30:03,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/ff58de0bae044831b113bb048c86f499 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/ff58de0bae044831b113bb048c86f499 2024-11-21T00:30:03,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/846dbce7bd27463a9e24119056e62c08 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/846dbce7bd27463a9e24119056e62c08 2024-11-21T00:30:03,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6b124858d73e4230ad0d0a95aeaa4082 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6b124858d73e4230ad0d0a95aeaa4082 2024-11-21T00:30:03,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6d3fbf475b9f4ceda44a341b2e357d4b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6d3fbf475b9f4ceda44a341b2e357d4b 2024-11-21T00:30:03,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7435a7408d874d64b7ff3fffc2a7f3e6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7435a7408d874d64b7ff3fffc2a7f3e6 2024-11-21T00:30:03,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4e33ccdb19bc45608b7f0c3696474a1c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4e33ccdb19bc45608b7f0c3696474a1c 2024-11-21T00:30:03,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2d5ad2c14e81492eadc6e6f2fe8d7cda to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2d5ad2c14e81492eadc6e6f2fe8d7cda 2024-11-21T00:30:03,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/38729516b2874b98bf7738d5a2d1a930 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/38729516b2874b98bf7738d5a2d1a930 2024-11-21T00:30:03,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/621dcaa494634f21bd7b9c33a7fffa79 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/621dcaa494634f21bd7b9c33a7fffa79 2024-11-21T00:30:03,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/53a7625f6e504c948bb3ea9e3199937b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/53a7625f6e504c948bb3ea9e3199937b 2024-11-21T00:30:03,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/e84f647806e74f98b20189f7e304465b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/e84f647806e74f98b20189f7e304465b 2024-11-21T00:30:03,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/0b39a5b87bbf4d7abafc1dc0a4267a69 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/0b39a5b87bbf4d7abafc1dc0a4267a69 2024-11-21T00:30:03,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5169f4d5a5794991bdfbe4c3ba7f2b6d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5169f4d5a5794991bdfbe4c3ba7f2b6d 2024-11-21T00:30:03,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f308d5f2e6a24c7db5d027ec3245e26c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/f308d5f2e6a24c7db5d027ec3245e26c 2024-11-21T00:30:03,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c364b4417544b19906c962fefb336f8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c364b4417544b19906c962fefb336f8 2024-11-21T00:30:03,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2e50d77664c643a7ba5043d9402c022b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/2e50d77664c643a7ba5043d9402c022b 2024-11-21T00:30:03,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/80fc00ce2db34a98a2f1e0469689bbac to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/80fc00ce2db34a98a2f1e0469689bbac 2024-11-21T00:30:03,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4c3f9426c8934814adae8b30bef31d5d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/4c3f9426c8934814adae8b30bef31d5d 2024-11-21T00:30:03,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5cd1b5cf56284ab8822b484add208218 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5cd1b5cf56284ab8822b484add208218 2024-11-21T00:30:03,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c8bbccd0407434dac84aaeaf19c5c95 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3c8bbccd0407434dac84aaeaf19c5c95 2024-11-21T00:30:03,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/113a81f1ab074513bea7549edf773d8d to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/113a81f1ab074513bea7549edf773d8d 2024-11-21T00:30:03,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/8a7eb9cbb21f44a98fa66af438efa883 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/8a7eb9cbb21f44a98fa66af438efa883 2024-11-21T00:30:03,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6fc21e894bc2484eb062ae572d44e1a0 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/6fc21e894bc2484eb062ae572d44e1a0 2024-11-21T00:30:03,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/b050acbbf28f4f62ae2bb4a982a0e0d8 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/b050acbbf28f4f62ae2bb4a982a0e0d8 2024-11-21T00:30:03,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/bb3759e7d02a4e8e8e16c394cb889c42 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/bb3759e7d02a4e8e8e16c394cb889c42 2024-11-21T00:30:03,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3b40a733756d44b8b41d7db20aad94b6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/3b40a733756d44b8b41d7db20aad94b6 2024-11-21T00:30:03,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7d1a99bf330f484dadb669059083c251 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/7d1a99bf330f484dadb669059083c251 2024-11-21T00:30:03,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/fc5e5a2aae194717a13b56c2b1db4008 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/fc5e5a2aae194717a13b56c2b1db4008 2024-11-21T00:30:03,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/52134ded2b4a40cc948d7dfd4180a6c6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/52134ded2b4a40cc948d7dfd4180a6c6 2024-11-21T00:30:03,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/053c225d871046d4abbcbd3e0430e2f2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/053c225d871046d4abbcbd3e0430e2f2 2024-11-21T00:30:03,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7a4a7db6182a4e809b3096ba20fa3ee7, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/198495984d5741bcaf3477e374f2413b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e457bfdf6c384cd1bc358f4977cd1feb, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/43ae9e7a5e014415b341b81d739609fc, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c45b46c4674a4c51af0f51436b4904af, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45fe840fdfc3487da2e551b280d4088a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/6330a4c1c29f4cb4b085f4bb3062f76c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c12655c2e85c43a4b4611219b65adb2c, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/adfa1f2578d844488d165e4218871361, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e79feb3262f84cfab9d8c2fcedbcbf29, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/921e90e7569a4e599845f1ae2f78de8b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45f6801f484745f49b5d040f3bfb5768, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4fa4ec090eea4296a86b3bea99d425f9, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/945d9a9d5874432ca741e4a221a4a970, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/ca7d7bb0ee334c2cb05425706d4ab23e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/9f87a2492d4f49a39dec927cb6a3d7ff, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/a11fabf469c644d5a9a1ee772e50f026, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7f155040de1b4748b2ac98e0d42a4d74, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c5c6fabc059a4bd79b9f243d61523008, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4936bc7ecbc04e248a0b0469555c607a, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c1c76b2d4b8a4a6f8e9bf666b2caff62, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/20ff658f24364fa28fa7fdac5447d761, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/d7fa1d6cc520413b82ba9b0010dbae7b, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/19f2ffd1f7764e3b8eb718a5e5891e93, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/940fa2bacbc748a5b1924016479bf11e, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/923996423349499ca2b355f08d2d609f, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/769ce07b6aad42f795acf356eb13f6e3, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/17abe04185354984b89143fa9e36bd44, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/cec2d4860ad541668af98ba7a624a0ac, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/edc326489e584aa5b4f9688ebbbb2daf, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/be78ec73981147098e8394cbe5bc3fa6] to archive 2024-11-21T00:30:03,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T00:30:03,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7a4a7db6182a4e809b3096ba20fa3ee7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7a4a7db6182a4e809b3096ba20fa3ee7 2024-11-21T00:30:03,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/198495984d5741bcaf3477e374f2413b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/198495984d5741bcaf3477e374f2413b 2024-11-21T00:30:03,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e457bfdf6c384cd1bc358f4977cd1feb to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e457bfdf6c384cd1bc358f4977cd1feb 2024-11-21T00:30:03,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/43ae9e7a5e014415b341b81d739609fc to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/43ae9e7a5e014415b341b81d739609fc 2024-11-21T00:30:03,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c45b46c4674a4c51af0f51436b4904af to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c45b46c4674a4c51af0f51436b4904af 2024-11-21T00:30:03,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45fe840fdfc3487da2e551b280d4088a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45fe840fdfc3487da2e551b280d4088a 2024-11-21T00:30:03,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/6330a4c1c29f4cb4b085f4bb3062f76c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/6330a4c1c29f4cb4b085f4bb3062f76c 2024-11-21T00:30:03,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c12655c2e85c43a4b4611219b65adb2c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c12655c2e85c43a4b4611219b65adb2c 2024-11-21T00:30:03,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/adfa1f2578d844488d165e4218871361 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/adfa1f2578d844488d165e4218871361 2024-11-21T00:30:03,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e79feb3262f84cfab9d8c2fcedbcbf29 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/e79feb3262f84cfab9d8c2fcedbcbf29 2024-11-21T00:30:03,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/921e90e7569a4e599845f1ae2f78de8b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/921e90e7569a4e599845f1ae2f78de8b 2024-11-21T00:30:03,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45f6801f484745f49b5d040f3bfb5768 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/45f6801f484745f49b5d040f3bfb5768 2024-11-21T00:30:03,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4fa4ec090eea4296a86b3bea99d425f9 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4fa4ec090eea4296a86b3bea99d425f9 2024-11-21T00:30:03,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/945d9a9d5874432ca741e4a221a4a970 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/945d9a9d5874432ca741e4a221a4a970 2024-11-21T00:30:03,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/ca7d7bb0ee334c2cb05425706d4ab23e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/ca7d7bb0ee334c2cb05425706d4ab23e 2024-11-21T00:30:03,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/9f87a2492d4f49a39dec927cb6a3d7ff to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/9f87a2492d4f49a39dec927cb6a3d7ff 2024-11-21T00:30:03,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/a11fabf469c644d5a9a1ee772e50f026 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/a11fabf469c644d5a9a1ee772e50f026 2024-11-21T00:30:03,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7f155040de1b4748b2ac98e0d42a4d74 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/7f155040de1b4748b2ac98e0d42a4d74 2024-11-21T00:30:03,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c5c6fabc059a4bd79b9f243d61523008 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c5c6fabc059a4bd79b9f243d61523008 2024-11-21T00:30:03,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4936bc7ecbc04e248a0b0469555c607a to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/4936bc7ecbc04e248a0b0469555c607a 2024-11-21T00:30:03,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c1c76b2d4b8a4a6f8e9bf666b2caff62 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/c1c76b2d4b8a4a6f8e9bf666b2caff62 2024-11-21T00:30:03,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/20ff658f24364fa28fa7fdac5447d761 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/20ff658f24364fa28fa7fdac5447d761 2024-11-21T00:30:03,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/d7fa1d6cc520413b82ba9b0010dbae7b to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/d7fa1d6cc520413b82ba9b0010dbae7b 2024-11-21T00:30:03,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/19f2ffd1f7764e3b8eb718a5e5891e93 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/19f2ffd1f7764e3b8eb718a5e5891e93 2024-11-21T00:30:03,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/940fa2bacbc748a5b1924016479bf11e to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/940fa2bacbc748a5b1924016479bf11e 2024-11-21T00:30:03,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/923996423349499ca2b355f08d2d609f to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/923996423349499ca2b355f08d2d609f 2024-11-21T00:30:03,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/769ce07b6aad42f795acf356eb13f6e3 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/769ce07b6aad42f795acf356eb13f6e3 2024-11-21T00:30:03,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/17abe04185354984b89143fa9e36bd44 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/17abe04185354984b89143fa9e36bd44 2024-11-21T00:30:03,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/cec2d4860ad541668af98ba7a624a0ac to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/cec2d4860ad541668af98ba7a624a0ac 2024-11-21T00:30:03,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/edc326489e584aa5b4f9688ebbbb2daf to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/edc326489e584aa5b4f9688ebbbb2daf 2024-11-21T00:30:03,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/be78ec73981147098e8394cbe5bc3fa6 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/be78ec73981147098e8394cbe5bc3fa6 2024-11-21T00:30:03,520 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/recovered.edits/474.seqid, newMaxSeqId=474, maxSeqId=4 2024-11-21T00:30:03,521 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19. 2024-11-21T00:30:03,521 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for dd3ebec8f640ec7aa187d6eb7b835b19: 2024-11-21T00:30:03,522 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,522 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=dd3ebec8f640ec7aa187d6eb7b835b19, regionState=CLOSED 2024-11-21T00:30:03,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-21T00:30:03,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; CloseRegionProcedure dd3ebec8f640ec7aa187d6eb7b835b19, server=0e7930017ff8,37961,1732148819586 in 1.9010 sec 2024-11-21T00:30:03,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-11-21T00:30:03,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=dd3ebec8f640ec7aa187d6eb7b835b19, UNASSIGN in 1.9030 sec 2024-11-21T00:30:03,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-21T00:30:03,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9060 sec 2024-11-21T00:30:03,526 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732149003526"}]},"ts":"1732149003526"} 2024-11-21T00:30:03,526 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-21T00:30:03,536 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-21T00:30:03,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9450 sec 2024-11-21T00:30:03,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-21T00:30:03,699 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-21T00:30:03,699 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-21T00:30:03,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:30:03,700 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:30:03,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-21T00:30:03,701 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:30:03,701 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,703 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C, FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/recovered.edits] 2024-11-21T00:30:03,705 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f765af2d6ff4910bb9e5503a19eec02 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/5f765af2d6ff4910bb9e5503a19eec02 2024-11-21T00:30:03,705 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/875a5090ba4f4527a5ef3196aa90005c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/875a5090ba4f4527a5ef3196aa90005c 2024-11-21T00:30:03,706 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/bbcc82ce2e7e47638b5ff7ecf572dd8c to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/A/bbcc82ce2e7e47638b5ff7ecf572dd8c 2024-11-21T00:30:03,708 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/51afa2f2411741fcbbea67367f61d067 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/51afa2f2411741fcbbea67367f61d067 2024-11-21T00:30:03,709 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5581541b69334dbf90f7cd837cb23077 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/5581541b69334dbf90f7cd837cb23077 2024-11-21T00:30:03,709 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/a8b864d6f5a9484d8022ae58b5a34901 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/B/a8b864d6f5a9484d8022ae58b5a34901 2024-11-21T00:30:03,710 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/1cf04e3fd6a8471ba69d98b51968c2a2 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/1cf04e3fd6a8471ba69d98b51968c2a2 2024-11-21T00:30:03,711 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/44467aee0df44be0a24a26092d4663f7 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/44467aee0df44be0a24a26092d4663f7 2024-11-21T00:30:03,712 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/8023bc1b4a6140d59b5bce74d0d76134 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/C/8023bc1b4a6140d59b5bce74d0d76134 2024-11-21T00:30:03,713 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/recovered.edits/474.seqid to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19/recovered.edits/474.seqid 2024-11-21T00:30:03,713 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/default/TestAcidGuarantees/dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,713 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-21T00:30:03,714 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-21T00:30:03,714 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-21T00:30:03,716 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112102ecc70b234446b5be4dc5efc433ab50_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112102ecc70b234446b5be4dc5efc433ab50_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,717 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211ca3afa383564db3a6eb4c5b13ea0eb6_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411211ca3afa383564db3a6eb4c5b13ea0eb6_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,717 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112128f16192dabc42efac85d44ae739e723_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112128f16192dabc42efac85d44ae739e723_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,718 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112135867b3ae4494636ab8a68dd7caa1b49_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112135867b3ae4494636ab8a68dd7caa1b49_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,719 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411213d8dff9c68ba40e3b2c3244b428d2db6_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411213d8dff9c68ba40e3b2c3244b428d2db6_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,719 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112158ed329f8fc0493f802963cb92e81355_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112158ed329f8fc0493f802963cb92e81355_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,720 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215901e1339300440383f0841b0eee311e_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215901e1339300440383f0841b0eee311e_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,721 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215c950a82175b4637848bd87af2bd3043_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411215c950a82175b4637848bd87af2bd3043_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,721 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121654de37b803c45fd912740dbddc4e739_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121654de37b803c45fd912740dbddc4e739_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,722 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411216a516165d18545b6851474735b6fcdf5_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411216a516165d18545b6851474735b6fcdf5_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,723 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411216c43c4593c154b94afc2cf6c732ae0ab_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411216c43c4593c154b94afc2cf6c732ae0ab_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,723 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112175c4d98641f34674a91180d2a2eb3b52_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112175c4d98641f34674a91180d2a2eb3b52_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,724 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112180d377a196cb42459bf069e72746e56a_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112180d377a196cb42459bf069e72746e56a_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,725 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112188e0391ab0844e3b86f55dfec0410932_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112188e0391ab0844e3b86f55dfec0410932_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,725 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411218df3e3fd141a4da3a330bce5485ee2c7_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411218df3e3fd141a4da3a330bce5485ee2c7_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,726 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b007c7bafc834397be15e733cbfcf183_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b007c7bafc834397be15e733cbfcf183_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,727 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b8a482dd62e7410084f668f7e334749d_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121b8a482dd62e7410084f668f7e334749d_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,728 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121c2f1d26c273e4099a3a7a2b85cb38f83_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121c2f1d26c273e4099a3a7a2b85cb38f83_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,729 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121c797f428c10f4b7b8ed1cc7ea68fa84d_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121c797f428c10f4b7b8ed1cc7ea68fa84d_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,730 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d1a9d2a1f6be415390efa4612db33ab2_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d1a9d2a1f6be415390efa4612db33ab2_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,730 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d321c411e4c044a2a261e1d438379b3e_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d321c411e4c044a2a261e1d438379b3e_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,731 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d8b489c7a19c41a6b8713d25baa5662d_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121d8b489c7a19c41a6b8713d25baa5662d_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,732 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121df3415c780e1437481a925bb71f4eaf7_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121df3415c780e1437481a925bb71f4eaf7_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,732 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121fc213ed2cabf427bacbaa7abb187665c_dd3ebec8f640ec7aa187d6eb7b835b19 to hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241121fc213ed2cabf427bacbaa7abb187665c_dd3ebec8f640ec7aa187d6eb7b835b19 2024-11-21T00:30:03,733 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-21T00:30:03,734 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:30:03,736 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-21T00:30:03,738 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-21T00:30:03,739 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:30:03,739 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-21T00:30:03,739 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732149003739"}]},"ts":"9223372036854775807"} 2024-11-21T00:30:03,740 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-21T00:30:03,740 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => dd3ebec8f640ec7aa187d6eb7b835b19, NAME => 'TestAcidGuarantees,,1732148970587.dd3ebec8f640ec7aa187d6eb7b835b19.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T00:30:03,740 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-21T00:30:03,741 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732149003740"}]},"ts":"9223372036854775807"} 2024-11-21T00:30:03,742 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-21T00:30:03,754 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-21T00:30:03,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 55 msec 2024-11-21T00:30:03,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35089 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-21T00:30:03,801 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-21T00:30:03,809 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=236 (was 240), OpenFileDescriptor=447 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=855 (was 836) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3112 (was 2260) - AvailableMemoryMB LEAK? - 2024-11-21T00:30:03,810 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-21T00:30:03,810 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-21T00:30:03,810 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560d619d to 127.0.0.1:64241 2024-11-21T00:30:03,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:30:03,810 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:30:03,810 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1160287858, stopped=false 2024-11-21T00:30:03,810 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=0e7930017ff8,35089,1732148818012 2024-11-21T00:30:03,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T00:30:03,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T00:30:03,820 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-21T00:30:03,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:30:03,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:30:03,820 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:30:03,820 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0e7930017ff8,37961,1732148819586' ***** 2024-11-21T00:30:03,820 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-21T00:30:03,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T00:30:03,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T00:30:03,821 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:30:03,821 INFO [RS:0;0e7930017ff8:37961 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:30:03,821 INFO [RS:0;0e7930017ff8:37961 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:30:03,821 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-21T00:30:03,821 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(3579): Received CLOSE for 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:30:03,821 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1224): stopping server 0e7930017ff8,37961,1732148819586 2024-11-21T00:30:03,821 DEBUG [RS:0;0e7930017ff8:37961 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:30:03,821 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:30:03,822 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:30:03,822 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:30:03,822 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3572e63abe2094af1c626c3e96fc06ec, disabling compactions & flushes 2024-11-21T00:30:03,822 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. after waiting 0 ms 2024-11-21T00:30:03,822 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:30:03,822 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1603): Online Regions={3572e63abe2094af1c626c3e96fc06ec=hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:30:03,822 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3572e63abe2094af1c626c3e96fc06ec 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:30:03,822 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:30:03,822 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:30:03,822 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-21T00:30:03,824 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:30:03,830 INFO [regionserver/0e7930017ff8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:30:03,830 INFO [regionserver/0e7930017ff8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:30:03,837 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec/.tmp/info/a0a8b2f5630c42d18c1c349fe9b2ead5 is 45, key is default/info:d/1732148825382/Put/seqid=0 2024-11-21T00:30:03,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742546_1722 (size=5037) 2024-11-21T00:30:03,846 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/info/633ff9c9b4d144328b82067fdc3ec550 is 143, key is hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec./info:regioninfo/1732148825160/Put/seqid=0 2024-11-21T00:30:03,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742547_1723 (size=7725) 2024-11-21T00:30:03,856 INFO [regionserver/0e7930017ff8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:30:04,025 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:30:04,225 DEBUG [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3572e63abe2094af1c626c3e96fc06ec 2024-11-21T00:30:04,240 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec/.tmp/info/a0a8b2f5630c42d18c1c349fe9b2ead5 2024-11-21T00:30:04,243 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec/.tmp/info/a0a8b2f5630c42d18c1c349fe9b2ead5 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec/info/a0a8b2f5630c42d18c1c349fe9b2ead5 2024-11-21T00:30:04,245 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec/info/a0a8b2f5630c42d18c1c349fe9b2ead5, entries=2, sequenceid=6, filesize=4.9 K 2024-11-21T00:30:04,245 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 3572e63abe2094af1c626c3e96fc06ec in 423ms, sequenceid=6, compaction requested=false 2024-11-21T00:30:04,248 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/namespace/3572e63abe2094af1c626c3e96fc06ec/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T00:30:04,248 INFO [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:30:04,248 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3572e63abe2094af1c626c3e96fc06ec: 2024-11-21T00:30:04,248 DEBUG [RS_CLOSE_REGION-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732148824254.3572e63abe2094af1c626c3e96fc06ec. 2024-11-21T00:30:04,249 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/info/633ff9c9b4d144328b82067fdc3ec550 2024-11-21T00:30:04,286 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/rep_barrier/b296c5cc30d04cf79ac10d3c2ff75bf6 is 102, key is TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7./rep_barrier:/1732148858027/DeleteFamily/seqid=0 2024-11-21T00:30:04,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742548_1724 (size=6025) 2024-11-21T00:30:04,304 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/rep_barrier/b296c5cc30d04cf79ac10d3c2ff75bf6 2024-11-21T00:30:04,324 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/table/a2ca1bd2db9e451981b9afca01ecbe2a is 96, key is TestAcidGuarantees,,1732148825888.6103bc2a66018bd699c0a8ab668a67b7./table:/1732148858027/DeleteFamily/seqid=0 2024-11-21T00:30:04,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742549_1725 (size=5942) 2024-11-21T00:30:04,328 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/table/a2ca1bd2db9e451981b9afca01ecbe2a 2024-11-21T00:30:04,331 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/info/633ff9c9b4d144328b82067fdc3ec550 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/info/633ff9c9b4d144328b82067fdc3ec550 2024-11-21T00:30:04,334 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/info/633ff9c9b4d144328b82067fdc3ec550, entries=22, sequenceid=93, filesize=7.5 K 2024-11-21T00:30:04,334 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/rep_barrier/b296c5cc30d04cf79ac10d3c2ff75bf6 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/rep_barrier/b296c5cc30d04cf79ac10d3c2ff75bf6 2024-11-21T00:30:04,337 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/rep_barrier/b296c5cc30d04cf79ac10d3c2ff75bf6, entries=6, sequenceid=93, filesize=5.9 K 2024-11-21T00:30:04,338 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/.tmp/table/a2ca1bd2db9e451981b9afca01ecbe2a as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/table/a2ca1bd2db9e451981b9afca01ecbe2a 2024-11-21T00:30:04,341 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/table/a2ca1bd2db9e451981b9afca01ecbe2a, entries=9, sequenceid=93, filesize=5.8 K 2024-11-21T00:30:04,342 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 520ms, sequenceid=93, compaction requested=false 2024-11-21T00:30:04,347 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-21T00:30:04,348 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:30:04,348 INFO [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-21T00:30:04,348 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-21T00:30:04,348 DEBUG [RS_CLOSE_META-regionserver/0e7930017ff8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:30:04,425 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1250): stopping server 0e7930017ff8,37961,1732148819586; all regions closed. 2024-11-21T00:30:04,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741834_1010 (size=26050) 2024-11-21T00:30:04,430 DEBUG [RS:0;0e7930017ff8:37961 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/oldWALs 2024-11-21T00:30:04,430 INFO [RS:0;0e7930017ff8:37961 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0e7930017ff8%2C37961%2C1732148819586.meta:.meta(num 1732148823615) 2024-11-21T00:30:04,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741833_1009 (size=16499533) 2024-11-21T00:30:04,434 DEBUG [RS:0;0e7930017ff8:37961 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/oldWALs 2024-11-21T00:30:04,434 INFO [RS:0;0e7930017ff8:37961 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0e7930017ff8%2C37961%2C1732148819586:(num 1732148823100) 2024-11-21T00:30:04,434 DEBUG [RS:0;0e7930017ff8:37961 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:30:04,434 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:30:04,434 INFO [RS:0;0e7930017ff8:37961 {}] hbase.ChoreService(370): Chore service for: regionserver/0e7930017ff8:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:30:04,434 INFO [regionserver/0e7930017ff8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-21T00:30:04,434 INFO [RS:0;0e7930017ff8:37961 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37961 2024-11-21T00:30:04,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0e7930017ff8,37961,1732148819586 2024-11-21T00:30:04,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T00:30:04,453 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0e7930017ff8,37961,1732148819586] 2024-11-21T00:30:04,453 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 0e7930017ff8,37961,1732148819586; numProcessing=1 2024-11-21T00:30:04,461 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/0e7930017ff8,37961,1732148819586 already deleted, retry=false 2024-11-21T00:30:04,461 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 0e7930017ff8,37961,1732148819586 expired; onlineServers=0 2024-11-21T00:30:04,461 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0e7930017ff8,35089,1732148818012' ***** 2024-11-21T00:30:04,462 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:30:04,462 DEBUG [M:0;0e7930017ff8:35089 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50441ae3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0e7930017ff8/172.17.0.2:0 2024-11-21T00:30:04,462 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HRegionServer(1224): stopping server 0e7930017ff8,35089,1732148818012 2024-11-21T00:30:04,462 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HRegionServer(1250): stopping server 0e7930017ff8,35089,1732148818012; all regions closed. 2024-11-21T00:30:04,462 DEBUG [M:0;0e7930017ff8:35089 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:30:04,462 DEBUG [M:0;0e7930017ff8:35089 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:30:04,462 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:30:04,462 DEBUG [M:0;0e7930017ff8:35089 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:30:04,462 DEBUG [master/0e7930017ff8:0:becomeActiveMaster-HFileCleaner.large.0-1732148822446 {}] cleaner.HFileCleaner(306): Exit Thread[master/0e7930017ff8:0:becomeActiveMaster-HFileCleaner.large.0-1732148822446,5,FailOnTimeoutGroup] 2024-11-21T00:30:04,462 DEBUG [master/0e7930017ff8:0:becomeActiveMaster-HFileCleaner.small.0-1732148822462 {}] cleaner.HFileCleaner(306): Exit Thread[master/0e7930017ff8:0:becomeActiveMaster-HFileCleaner.small.0-1732148822462,5,FailOnTimeoutGroup] 2024-11-21T00:30:04,462 INFO [M:0;0e7930017ff8:35089 {}] hbase.ChoreService(370): Chore service for: master/0e7930017ff8:0 had [] on shutdown 2024-11-21T00:30:04,463 DEBUG [M:0;0e7930017ff8:35089 {}] master.HMaster(1733): Stopping service threads 2024-11-21T00:30:04,463 INFO [M:0;0e7930017ff8:35089 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:30:04,463 ERROR [M:0;0e7930017ff8:35089 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-21T00:30:04,464 INFO [M:0;0e7930017ff8:35089 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:30:04,464 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:30:04,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T00:30:04,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T00:30:04,470 DEBUG [M:0;0e7930017ff8:35089 {}] zookeeper.ZKUtil(347): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T00:30:04,470 WARN [M:0;0e7930017ff8:35089 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:30:04,470 INFO [M:0;0e7930017ff8:35089 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-21T00:30:04,470 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T00:30:04,470 INFO [M:0;0e7930017ff8:35089 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:30:04,470 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:30:04,470 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:30:04,470 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:30:04,470 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:30:04,470 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:30:04,470 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=784.58 KB heapSize=966.12 KB 2024-11-21T00:30:04,484 DEBUG [M:0;0e7930017ff8:35089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/52d803b4c73945ceb16d348a36e07248 is 82, key is hbase:meta,,1/info:regioninfo/1732148823968/Put/seqid=0 2024-11-21T00:30:04,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742550_1726 (size=5672) 2024-11-21T00:30:04,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:30:04,553 INFO [RS:0;0e7930017ff8:37961 {}] regionserver.HRegionServer(1307): Exiting; stopping=0e7930017ff8,37961,1732148819586; zookeeper connection closed. 2024-11-21T00:30:04,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1015c13103f0001, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:30:04,554 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@57dd6532 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@57dd6532 2024-11-21T00:30:04,554 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:30:04,887 INFO [M:0;0e7930017ff8:35089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2243 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/52d803b4c73945ceb16d348a36e07248 2024-11-21T00:30:04,906 DEBUG [M:0;0e7930017ff8:35089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da025f5999a4435ba5c23ad069f20f73 is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x9C/proc:d/1732148973365/Put/seqid=0 2024-11-21T00:30:04,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742551_1727 (size=45043) 2024-11-21T00:30:05,310 INFO [M:0;0e7930017ff8:35089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=784.03 KB at sequenceid=2243 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da025f5999a4435ba5c23ad069f20f73 2024-11-21T00:30:05,313 INFO [M:0;0e7930017ff8:35089 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for da025f5999a4435ba5c23ad069f20f73 2024-11-21T00:30:05,330 DEBUG [M:0;0e7930017ff8:35089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3416cc2a81ab4183ba0b82f78999925b is 69, key is 0e7930017ff8,37961,1732148819586/rs:state/1732148822616/Put/seqid=0 2024-11-21T00:30:05,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073742552_1728 (size=5156) 2024-11-21T00:30:05,734 INFO [M:0;0e7930017ff8:35089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2243 (bloomFilter=true), to=hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3416cc2a81ab4183ba0b82f78999925b 2024-11-21T00:30:05,736 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/52d803b4c73945ceb16d348a36e07248 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/52d803b4c73945ceb16d348a36e07248 2024-11-21T00:30:05,739 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/52d803b4c73945ceb16d348a36e07248, entries=8, sequenceid=2243, filesize=5.5 K 2024-11-21T00:30:05,739 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da025f5999a4435ba5c23ad069f20f73 as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da025f5999a4435ba5c23ad069f20f73 2024-11-21T00:30:05,741 INFO [M:0;0e7930017ff8:35089 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for da025f5999a4435ba5c23ad069f20f73 2024-11-21T00:30:05,741 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da025f5999a4435ba5c23ad069f20f73, entries=179, sequenceid=2243, filesize=44.0 K 2024-11-21T00:30:05,742 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3416cc2a81ab4183ba0b82f78999925b as hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3416cc2a81ab4183ba0b82f78999925b 2024-11-21T00:30:05,744 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38105/user/jenkins/test-data/efabc81b-8af9-841e-3d49-744be5046c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3416cc2a81ab4183ba0b82f78999925b, entries=1, sequenceid=2243, filesize=5.0 K 2024-11-21T00:30:05,745 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(3040): Finished flush of dataSize ~784.58 KB/803414, heapSize ~965.82 KB/989000, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1275ms, sequenceid=2243, compaction requested=false 2024-11-21T00:30:05,746 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:30:05,746 DEBUG [M:0;0e7930017ff8:35089 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-21T00:30:05,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39227 is added to blk_1073741830_1006 (size=950448) 2024-11-21T00:30:05,751 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-21T00:30:05,751 INFO [M:0;0e7930017ff8:35089 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-21T00:30:05,751 INFO [M:0;0e7930017ff8:35089 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35089 2024-11-21T00:30:05,786 DEBUG [M:0;0e7930017ff8:35089 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/0e7930017ff8,35089,1732148818012 already deleted, retry=false 2024-11-21T00:30:05,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:30:05,895 INFO [M:0;0e7930017ff8:35089 {}] regionserver.HRegionServer(1307): Exiting; stopping=0e7930017ff8,35089,1732148818012; zookeeper connection closed. 2024-11-21T00:30:05,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35089-0x1015c13103f0000, quorum=127.0.0.1:64241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:30:05,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bd2e890{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:30:05,903 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:30:05,903 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:30:05,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:30:05,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/hadoop.log.dir/,STOPPED} 2024-11-21T00:30:05,906 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:30:05,906 WARN [BP-364493626-172.17.0.2-1732148814546 heartbeating to localhost/127.0.0.1:38105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:30:05,906 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:30:05,906 WARN [BP-364493626-172.17.0.2-1732148814546 heartbeating to localhost/127.0.0.1:38105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-364493626-172.17.0.2-1732148814546 (Datanode Uuid a8cc0fd4-480a-47ac-9b45-4eb618d27036) service to localhost/127.0.0.1:38105 2024-11-21T00:30:05,908 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/dfs/data/data1/current/BP-364493626-172.17.0.2-1732148814546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:30:05,909 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/cluster_65bf299e-e14d-bbb0-072b-1fb2c47b9552/dfs/data/data2/current/BP-364493626-172.17.0.2-1732148814546 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:30:05,909 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:30:05,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0d4558{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:30:05,916 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:30:05,916 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:30:05,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:30:05,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2016def-135d-91ef-deae-defe17728e56/hadoop.log.dir/,STOPPED} 2024-11-21T00:30:05,931 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:30:06,074 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down